org.apache.hadoop.HadoopIllegalArgumentException Java Examples

The following examples show how to use org.apache.hadoop.HadoopIllegalArgumentException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ActiveStandbyElector.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * To participate in election, the app will call joinElection. The result will
 * be notified by a callback on either the becomeActive or becomeStandby app
 * interfaces. <br/>
 * After this the elector will automatically monitor the leader status and
 * perform re-election if necessary<br/>
 * The app could potentially start off in standby mode and ignore the
 * becomeStandby call.
 * 
 * @param data
 *          to be set by the app. non-null data must be set.
 * @throws HadoopIllegalArgumentException
 *           if valid data is not supplied
 */
public synchronized void joinElection(byte[] data)
    throws HadoopIllegalArgumentException {
  
  if (data == null) {
    throw new HadoopIllegalArgumentException("data cannot be null");
  }
  
  if (wantToBeInElection) {
    LOG.info("Already in election. Not re-connecting.");
    return;
  }

  appData = new byte[data.length];
  System.arraycopy(data, 0, appData, 0, data.length);

  LOG.debug("Attempting active election for " + this);
  joinElectionInternal();
}
 
Example #2
Source File: LightWeightCache.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public E put(final E entry) {
  if (!(entry instanceof Entry)) {
    throw new HadoopIllegalArgumentException(
        "!(entry instanceof Entry), entry.getClass()=" + entry.getClass());
  }

  evictExpiredEntries();

  final E existing = super.put(entry);
  if (existing != null) {
    queue.remove(existing);
  }

  final Entry e = (Entry)entry;
  setExpirationTime(e, creationExpirationPeriod);
  queue.offer(e);
  
  evictEntries();
  return existing;
}
 
Example #3
Source File: RpcClientFactoryPBImpl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
public void stopClient(Object proxy) {
  try {
    if (proxy instanceof Closeable) {
      ((Closeable) proxy).close();
      return;
    } else {
      InvocationHandler handler = Proxy.getInvocationHandler(proxy);
      if (handler instanceof Closeable) {
        ((Closeable) handler).close();
        return;
      }
    }
  } catch (Exception e) {
    LOG.error("Cannot call close method due to Exception. " + "Ignoring.", e);
    throw new YarnRuntimeException(e);
  }
  throw new HadoopIllegalArgumentException(
    "Cannot close proxy - is not Closeable or "
        + "does not provide closeable invocation handler " + proxy.getClass());
}
 
Example #4
Source File: RPC.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Build the RPC Server. 
 * @throws IOException on error
 * @throws HadoopIllegalArgumentException when mandatory fields are not set
 */
public Server build() throws IOException, HadoopIllegalArgumentException {
  if (this.conf == null) {
    throw new HadoopIllegalArgumentException("conf is not set");
  }
  if (this.protocol == null) {
    throw new HadoopIllegalArgumentException("protocol is not set");
  }
  if (this.instance == null) {
    throw new HadoopIllegalArgumentException("instance is not set");
  }
  
  return getProtocolEngine(this.protocol, this.conf).getServer(
      this.protocol, this.instance, this.bindAddress, this.port,
      this.numHandlers, this.numReaders, this.queueSizePerHandler,
      this.verbose, this.conf, this.secretManager, this.portRangeConfig);
}
 
Example #5
Source File: YarnConfiguration.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Private
public static int getRMDefaultPortNumber(String addressPrefix,
    Configuration conf) {
  if (addressPrefix.equals(YarnConfiguration.RM_ADDRESS)) {
    return YarnConfiguration.DEFAULT_RM_PORT;
  } else if (addressPrefix.equals(YarnConfiguration.RM_SCHEDULER_ADDRESS)) {
    return YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT;
  } else if (addressPrefix.equals(YarnConfiguration.RM_WEBAPP_ADDRESS)) {
    return YarnConfiguration.DEFAULT_RM_WEBAPP_PORT;
  } else if (addressPrefix.equals(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS)) {
    return YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT;
  } else if (addressPrefix
      .equals(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS)) {
    return YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT;
  } else if (addressPrefix.equals(YarnConfiguration.RM_ADMIN_ADDRESS)) {
    return YarnConfiguration.DEFAULT_RM_ADMIN_PORT;
  } else {
    throw new HadoopIllegalArgumentException(
        "Invalid RM RPC address Prefix: " + addressPrefix
            + ". The valid value should be one of "
            + getServiceAddressConfKeys(conf));
  }
}
 
Example #6
Source File: XAttrCommands.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
  name = StringUtils.popOptionWithArgument("-n", args);
  String v = StringUtils.popOptionWithArgument("-v", args);
  if (v != null) {
    value = XAttrCodec.decodeValue(v);
  }
  xname = StringUtils.popOptionWithArgument("-x", args);

  if (name != null && xname != null) {
    throw new HadoopIllegalArgumentException(
        "Can not specify both '-n name' and '-x name' option.");
  }
  if (name == null && xname == null) {
    throw new HadoopIllegalArgumentException(
        "Must specify '-n name' or '-x name' option.");
  }

  if (args.isEmpty()) {
    throw new HadoopIllegalArgumentException("<path> is missing.");
  }
  if (args.size() > 1) {
    throw new HadoopIllegalArgumentException("Too many arguments.");
  }
}
 
Example #7
Source File: ReplaceDatanodeOnFailure.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private static Policy getPolicy(final Configuration conf) {
  final boolean enabled = conf.getBoolean(
      DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY,
      DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_DEFAULT);
  if (!enabled) {
    return Policy.DISABLE;
  }

  final String policy = conf.get(
      DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY,
      DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT);
  for(int i = 1; i < Policy.values().length; i++) {
    final Policy p = Policy.values()[i];
    if (p.name().equalsIgnoreCase(policy)) {
      return p;
    }
  }
  throw new HadoopIllegalArgumentException("Illegal configuration value for "
      + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY
      + ": " + policy);
}
 
Example #8
Source File: DomainSocketFactory.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public DomainSocketFactory(Conf conf) {
  final String feature;
  if (conf.isShortCircuitLocalReads() && (!conf.isUseLegacyBlockReaderLocal())) {
    feature = "The short-circuit local reads feature";
  } else if (conf.isDomainSocketDataTraffic()) {
    feature = "UNIX domain socket data traffic";
  } else {
    feature = null;
  }

  if (feature == null) {
    PerformanceAdvisory.LOG.debug(
        "Both short-circuit local reads and UNIX domain socket are disabled.");
  } else {
    if (conf.getDomainSocketPath().isEmpty()) {
      throw new HadoopIllegalArgumentException(feature + " is enabled but "
          + DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set.");
    } else if (DomainSocket.getLoadingFailureReason() != null) {
      LOG.warn(feature + " cannot be used because "
          + DomainSocket.getLoadingFailureReason());
    } else {
      LOG.debug(feature + " is enabled.");
    }
  }
}
 
Example #9
Source File: HAUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Get the namenode Id by matching the {@code addressKey}
 * with the the address of the local node.
 * 
 * If {@link DFSConfigKeys#DFS_HA_NAMENODE_ID_KEY} is not specifically
 * configured, this method determines the namenode Id by matching the local
 * node's address with the configured addresses. When a match is found, it
 * returns the namenode Id from the corresponding configuration key.
 * 
 * @param conf Configuration
 * @return namenode Id on success, null on failure.
 * @throws HadoopIllegalArgumentException on error
 */
public static String getNameNodeId(Configuration conf, String nsId) {
  String namenodeId = conf.getTrimmed(DFS_HA_NAMENODE_ID_KEY);
  if (namenodeId != null) {
    return namenodeId;
  }
  
  String suffixes[] = DFSUtil.getSuffixIDs(conf, DFS_NAMENODE_RPC_ADDRESS_KEY,
      nsId, null, DFSUtil.LOCAL_ADDRESS_MATCHER);
  if (suffixes == null) {
    String msg = "Configuration " + DFS_NAMENODE_RPC_ADDRESS_KEY + 
        " must be suffixed with nameservice and namenode ID for HA " +
        "configuration.";
    throw new HadoopIllegalArgumentException(msg);
  }
  
  return suffixes[1];
}
 
Example #10
Source File: ByteArrayManager.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * @return the least power of two greater than or equal to n, i.e. return
 *         the least integer x with x >= n and x a power of two.
 *
 * @throws HadoopIllegalArgumentException
 *           if n <= 0.
 */
public static int leastPowerOfTwo(final int n) {
  if (n <= 0) {
    throw new HadoopIllegalArgumentException("n = " + n + " <= 0");
  }

  final int highestOne = Integer.highestOneBit(n);
  if (highestOne == n) {
    return n; // n is a power of two.
  }
  final int roundUp = highestOne << 1;
  if (roundUp < 0) {
    final long overflow = ((long) highestOne) << 1;
    throw new ArithmeticException(
        "Overflow: for n = " + n + ", the least power of two (the least"
        + " integer x with x >= n and x a power of two) = "
        + overflow + " > Integer.MAX_VALUE = " + Integer.MAX_VALUE);
  }
  return roundUp;
}
 
Example #11
Source File: DFSZKFailoverController.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  if (nnId == null) {
    String msg = "Could not get the namenode ID of this node. " +
        "You may run zkfc on the node other than namenode.";
    throw new HadoopIllegalArgumentException(msg);
  }
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
Example #12
Source File: FSDirXAttrOp.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies that the combined size of the name and value of an xattr is within
 * the configured limit. Setting a limit of zero disables this check.
 */
private static void checkXAttrSize(FSDirectory fsd, XAttr xAttr) {
  if (fsd.getXattrMaxSize() == 0) {
    return;
  }
  int size = xAttr.getName().getBytes(Charsets.UTF_8).length;
  if (xAttr.getValue() != null) {
    size += xAttr.getValue().length;
  }
  if (size > fsd.getXattrMaxSize()) {
    throw new HadoopIllegalArgumentException(
        "The XAttr is too big. The maximum combined size of the"
        + " name and value is " + fsd.getXattrMaxSize()
        + ", but the total size is " + size);
  }
}
 
Example #13
Source File: FSImageFormatPBINode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void addToParent(INodeDirectory parent, INode child) {
  if (parent == dir.rootDir && FSDirectory.isReservedName(child)) {
    throw new HadoopIllegalArgumentException("File name \""
        + child.getLocalName() + "\" is reserved. Please "
        + " change the name of the existing file or directory to another "
        + "name before upgrading to this release.");
  }
  // NOTE: This does not update space counts for parents
  if (!parent.addChild(child)) {
    return;
  }
  dir.cacheName(child);

  if (child.isFile()) {
    updateBlocksMap(child.asFile(), fsn.getBlockManager());
  }
}
 
Example #14
Source File: ZKFailoverController.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void initZK() throws HadoopIllegalArgumentException, IOException,
    KeeperException {
  zkQuorum = conf.get(ZK_QUORUM_KEY);
  int zkTimeout = conf.getInt(ZK_SESSION_TIMEOUT_KEY,
      ZK_SESSION_TIMEOUT_DEFAULT);
  // Parse ACLs from configuration.
  String zkAclConf = conf.get(ZK_ACL_KEY, ZK_ACL_DEFAULT);
  zkAclConf = ZKUtil.resolveConfIndirection(zkAclConf);
  List<ACL> zkAcls = ZKUtil.parseACLs(zkAclConf);
  if (zkAcls.isEmpty()) {
    zkAcls = Ids.CREATOR_ALL_ACL;
  }
  
  // Parse authentication from configuration.
  String zkAuthConf = conf.get(ZK_AUTH_KEY);
  zkAuthConf = ZKUtil.resolveConfIndirection(zkAuthConf);
  List<ZKAuthInfo> zkAuths;
  if (zkAuthConf != null) {
    zkAuths = ZKUtil.parseAuth(zkAuthConf);
  } else {
    zkAuths = Collections.emptyList();
  }

  // Sanity check configuration.
  Preconditions.checkArgument(zkQuorum != null,
      "Missing required configuration '%s' for ZooKeeper quorum",
      ZK_QUORUM_KEY);
  Preconditions.checkArgument(zkTimeout > 0,
      "Invalid ZK session timeout %s", zkTimeout);
  
  int maxRetryNum = conf.getInt(
      CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_KEY,
      CommonConfigurationKeys.HA_FC_ELECTOR_ZK_OP_RETRIES_DEFAULT);
  elector = new ActiveStandbyElector(zkQuorum,
      zkTimeout, getParentZnode(), zkAcls, zkAuths,
      new ElectorCallbacks(), maxRetryNum);
}
 
Example #15
Source File: AclCommands.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
  CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "R");
  cf.parse(args);
  setRecursive(cf.getOpt("R"));
  if (args.isEmpty()) {
    throw new HadoopIllegalArgumentException("<path> is missing");
  }
  if (args.size() > 1) {
    throw new HadoopIllegalArgumentException("Too many arguments");
  }
}
 
Example #16
Source File: XAttrCommands.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
  name = StringUtils.popOptionWithArgument("-n", args);
  String en = StringUtils.popOptionWithArgument("-e", args);
  if (en != null) {
    try {
      encoding = enValueOfFunc.apply(StringUtils.toUpperCase(en));
    } catch (IllegalArgumentException e) {
      throw new IllegalArgumentException(
          "Invalid/unsupported encoding option specified: " + en);
    }
    Preconditions.checkArgument(encoding != null,
        "Invalid/unsupported encoding option specified: " + en);
  }

  boolean r = StringUtils.popOption("-R", args);
  setRecursive(r);
  dump = StringUtils.popOption("-d", args);

  if (!dump && name == null) {
    throw new HadoopIllegalArgumentException(
        "Must specify '-n name' or '-d' option.");
  }

  if (args.isEmpty()) {
    throw new HadoopIllegalArgumentException("<path> is missing.");
  }
  if (args.size() > 1) {
    throw new HadoopIllegalArgumentException("Too many arguments.");
  }
}
 
Example #17
Source File: LightWeightGSet.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
static int computeCapacity(long maxMemory, double percentage,
    String mapName) {
  if (percentage > 100.0 || percentage < 0.0) {
    throw new HadoopIllegalArgumentException("Percentage " + percentage
        + " must be greater than or equal to 0 "
        + " and less than or equal to 100");
  }
  if (maxMemory < 0) {
    throw new HadoopIllegalArgumentException("Memory " + maxMemory
        + " must be greater than or equal to 0");
  }
  if (percentage == 0.0 || maxMemory == 0) {
    return 0;
  }
  //VM detection
  //See http://java.sun.com/docs/hotspot/HotSpotFAQ.html#64bit_detection
  final String vmBit = System.getProperty("sun.arch.data.model");

  //Percentage of max memory
  final double percentDivisor = 100.0/percentage;
  final double percentMemory = maxMemory/percentDivisor;
  
  //compute capacity
  final int e1 = (int)(Math.log(percentMemory)/Math.log(2.0) + 0.5);
  final int e2 = e1 - ("32".equals(vmBit)? 2: 3);
  final int exponent = e2 < 0? 0: e2 > 30? 30: e2;
  final int c = 1 << exponent;

  LOG.info("Computing capacity for map " + mapName);
  LOG.info("VM type       = " + vmBit + "-bit");
  LOG.info(percentage + "% max memory "
      + StringUtils.TraditionalBinaryPrefix.long2String(maxMemory, "B", 1)
      + " = "
      + StringUtils.TraditionalBinaryPrefix.long2String((long) percentMemory,
          "B", 1));
  LOG.info("capacity      = 2^" + exponent + " = " + c + " entries");
  return c;
}
 
Example #18
Source File: LightWeightGSet.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public E put(final E element) {
  //validate element
  if (element == null) {
    throw new NullPointerException("Null element is not supported.");
  }
  if (!(element instanceof LinkedElement)) {
    throw new HadoopIllegalArgumentException(
        "!(element instanceof LinkedElement), element.getClass()="
        + element.getClass());
  }
  final LinkedElement e = (LinkedElement)element;

  //find index
  final int index = getIndex(element);

  //remove if it already exists
  final E existing = remove(index, element);

  //insert the element to the head of the linked list
  modification++;
  size++;
  e.setNext(entries[index]);
  entries[index] = e;

  return existing;
}
 
Example #19
Source File: AbstractFileSystem.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Check that the Uri's scheme matches
 * @param uri
 * @param supportedScheme
 */
public void checkScheme(URI uri, String supportedScheme) {
  String scheme = uri.getScheme();
  if (scheme == null) {
    throw new HadoopIllegalArgumentException("Uri without scheme: " + uri);
  }
  if (!scheme.equals(supportedScheme)) {
    throw new HadoopIllegalArgumentException("Uri scheme " + uri
        + " does not match the scheme " + supportedScheme);
  }
}
 
Example #20
Source File: Path.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Pathnames with scheme and relative path are illegal.
 */
void checkNotSchemeWithRelative() {
  if (toUri().isAbsolute() && !isUriPathAbsolute()) {
    throw new HadoopIllegalArgumentException(
        "Unsupported name: has scheme but relative path-part");
  }
}
 
Example #21
Source File: TestDFSUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test {@link DFSUtil#getNamenodeNameServiceId(Configuration)} to ensure
 * exception is thrown when multiple rpc addresses match the local node's
 * address
 */
@Test(expected = HadoopIllegalArgumentException.class)
public void testGetNameServiceIdException() {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set(DFS_NAMESERVICES, "nn1,nn2");
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn1"),
      "localhost:9000");
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
      "localhost:9001");
  DFSUtil.getNamenodeNameServiceId(conf);
  fail("Expected exception is not thrown");
}
 
Example #22
Source File: MapFile.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private void open(Path[] inMapFiles, Path outMapFile) throws IOException {
  inReaders = new Reader[inMapFiles.length];
  for (int i = 0; i < inMapFiles.length; i++) {
    Reader reader = new Reader(inMapFiles[i], conf);
    if (keyClass == null || valueClass == null) {
      keyClass = (Class<WritableComparable>) reader.getKeyClass();
      valueClass = (Class<Writable>) reader.getValueClass();
    } else if (keyClass != reader.getKeyClass()
        || valueClass != reader.getValueClass()) {
      throw new HadoopIllegalArgumentException(
          "Input files cannot be merged as they"
              + " have different Key and Value classes");
    }
    inReaders[i] = reader;
  }

  if (comparator == null) {
    Class<? extends WritableComparable> cls;
    cls = keyClass.asSubclass(WritableComparable.class);
    this.comparator = WritableComparator.get(cls, conf);
  } else if (comparator.getKeyClass() != keyClass) {
    throw new HadoopIllegalArgumentException(
        "Input files cannot be merged as they"
            + " have different Key class compared to"
            + " specified comparator");
  }

  outWriter = new MapFile.Writer(conf, outMapFile,
      MapFile.Writer.keyClass(keyClass),
      MapFile.Writer.valueClass(valueClass));
}
 
Example #23
Source File: GetConf.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected void checkArgs(String[] args) {
  if (args.length != 1) {
    throw new HadoopIllegalArgumentException(
        "usage: " + Command.CONFKEY.getUsage());
  }
}
 
Example #24
Source File: ArrayPrimitiveWritable.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static void checkPrimitive(Class<?> componentType) {
  if (componentType == null) { 
    throw new HadoopIllegalArgumentException("null component type not allowed"); 
  }
  if (! PRIMITIVE_NAMES.containsKey(componentType.getName())) {
    throw new HadoopIllegalArgumentException("input array component type "
        + componentType.getName() + " is not a candidate primitive type");
  }
}
 
Example #25
Source File: ReplicaMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
ReplicaMap(Object mutex) {
  if (mutex == null) {
    throw new HadoopIllegalArgumentException(
        "Object to synchronize on cannot be null");
  }
  this.mutex = mutex;
}
 
Example #26
Source File: DatanodeManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Fetch live and dead datanodes. */
public void fetchDatanodes(final List<DatanodeDescriptor> live, 
    final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) {
  if (live == null && dead == null) {
    throw new HadoopIllegalArgumentException("Both live and dead lists are null");
  }

  // There is no need to take namesystem reader lock as
  // getDatanodeListForReport will synchronize on datanodeMap
  final List<DatanodeDescriptor> results =
      getDatanodeListForReport(DatanodeReportType.ALL);
  for(DatanodeDescriptor node : results) {
    if (isDatanodeDead(node)) {
      if (dead != null) {
        dead.add(node);
      }
    } else {
      if (live != null) {
        live.add(node);
      }
    }
  }
  
  if (removeDecommissionNode) {
    if (live != null) {
      removeDecomNodeFromList(live);
    }
    if (dead != null) {
      removeDecomNodeFromList(dead);
    }
  }
}
 
Example #27
Source File: BlockManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Set BlockPlacementPolicy */
public void setBlockPlacementPolicy(BlockPlacementPolicy newpolicy) {
  if (newpolicy == null) {
    throw new HadoopIllegalArgumentException("newpolicy == null");
  }
  this.blockplacement = newpolicy;
}
 
Example #28
Source File: ArrayPrimitiveWritable.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void checkDeclaredComponentType(Class<?> componentType) {
  if ((declaredComponentType != null) 
      && (componentType != declaredComponentType)) {
    throw new HadoopIllegalArgumentException("input array component type "
        + componentType.getName() + " does not match declared type "
        + declaredComponentType.getName());     
  }
}
 
Example #29
Source File: ArrayPrimitiveWritable.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static void checkArray(Object value) {
  if (value == null) { 
    throw new HadoopIllegalArgumentException("null value not allowed"); 
  }
  if (! value.getClass().isArray()) {
    throw new HadoopIllegalArgumentException("non-array value of class "
        + value.getClass() + " not allowed");             
  }
}
 
Example #30
Source File: TestFsLimits.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testNoLimits() throws Exception {
  mkdirs("/1", null);
  mkdirs("/22", null);
  mkdirs("/333", null);
  mkdirs("/4444", null);
  mkdirs("/55555", null);
  mkdirs("/1/" + HdfsConstants.DOT_SNAPSHOT_DIR,
      HadoopIllegalArgumentException.class);
}