Java Code Examples for org.apache.hadoop.HadoopIllegalArgumentException

The following examples show how to use org.apache.hadoop.HadoopIllegalArgumentException. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: RpcClientFactoryPBImpl.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void stopClient(Object proxy) {
  try {
    if (proxy instanceof Closeable) {
      ((Closeable) proxy).close();
      return;
    } else {
      InvocationHandler handler = Proxy.getInvocationHandler(proxy);
      if (handler instanceof Closeable) {
        ((Closeable) handler).close();
        return;
      }
    }
  } catch (Exception e) {
    LOG.error("Cannot call close method due to Exception. " + "Ignoring.", e);
    throw new YarnRuntimeException(e);
  }
  throw new HadoopIllegalArgumentException(
    "Cannot close proxy - is not Closeable or "
        + "does not provide closeable invocation handler " + proxy.getClass());
}
 
Example 2
Source Project: hadoop   Source File: YarnConfiguration.java    License: Apache License 2.0 6 votes vote down vote up
@Private
public static int getRMDefaultPortNumber(String addressPrefix,
    Configuration conf) {
  if (addressPrefix.equals(YarnConfiguration.RM_ADDRESS)) {
    return YarnConfiguration.DEFAULT_RM_PORT;
  } else if (addressPrefix.equals(YarnConfiguration.RM_SCHEDULER_ADDRESS)) {
    return YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT;
  } else if (addressPrefix.equals(YarnConfiguration.RM_WEBAPP_ADDRESS)) {
    return YarnConfiguration.DEFAULT_RM_WEBAPP_PORT;
  } else if (addressPrefix.equals(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS)) {
    return YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT;
  } else if (addressPrefix
      .equals(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS)) {
    return YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT;
  } else if (addressPrefix.equals(YarnConfiguration.RM_ADMIN_ADDRESS)) {
    return YarnConfiguration.DEFAULT_RM_ADMIN_PORT;
  } else {
    throw new HadoopIllegalArgumentException(
        "Invalid RM RPC address Prefix: " + addressPrefix
            + ". The valid value should be one of "
            + getServiceAddressConfKeys(conf));
  }
}
 
Example 3
Source Project: hadoop   Source File: ReplaceDatanodeOnFailure.java    License: Apache License 2.0 6 votes vote down vote up
private static Policy getPolicy(final Configuration conf) {
  final boolean enabled = conf.getBoolean(
      DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY,
      DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_DEFAULT);
  if (!enabled) {
    return Policy.DISABLE;
  }

  final String policy = conf.get(
      DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY,
      DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT);
  for(int i = 1; i < Policy.values().length; i++) {
    final Policy p = Policy.values()[i];
    if (p.name().equalsIgnoreCase(policy)) {
      return p;
    }
  }
  throw new HadoopIllegalArgumentException("Illegal configuration value for "
      + DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY
      + ": " + policy);
}
 
Example 4
Source Project: hadoop   Source File: DomainSocketFactory.java    License: Apache License 2.0 6 votes vote down vote up
public DomainSocketFactory(Conf conf) {
  final String feature;
  if (conf.isShortCircuitLocalReads() && (!conf.isUseLegacyBlockReaderLocal())) {
    feature = "The short-circuit local reads feature";
  } else if (conf.isDomainSocketDataTraffic()) {
    feature = "UNIX domain socket data traffic";
  } else {
    feature = null;
  }

  if (feature == null) {
    PerformanceAdvisory.LOG.debug(
        "Both short-circuit local reads and UNIX domain socket are disabled.");
  } else {
    if (conf.getDomainSocketPath().isEmpty()) {
      throw new HadoopIllegalArgumentException(feature + " is enabled but "
          + DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set.");
    } else if (DomainSocket.getLoadingFailureReason() != null) {
      LOG.warn(feature + " cannot be used because "
          + DomainSocket.getLoadingFailureReason());
    } else {
      LOG.debug(feature + " is enabled.");
    }
  }
}
 
Example 5
Source Project: hadoop   Source File: FSImageFormatPBINode.java    License: Apache License 2.0 6 votes vote down vote up
private void addToParent(INodeDirectory parent, INode child) {
  if (parent == dir.rootDir && FSDirectory.isReservedName(child)) {
    throw new HadoopIllegalArgumentException("File name \""
        + child.getLocalName() + "\" is reserved. Please "
        + " change the name of the existing file or directory to another "
        + "name before upgrading to this release.");
  }
  // NOTE: This does not update space counts for parents
  if (!parent.addChild(child)) {
    return;
  }
  dir.cacheName(child);

  if (child.isFile()) {
    updateBlocksMap(child.asFile(), fsn.getBlockManager());
  }
}
 
Example 6
Source Project: hadoop   Source File: FSDirXAttrOp.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Verifies that the combined size of the name and value of an xattr is within
 * the configured limit. Setting a limit of zero disables this check.
 */
private static void checkXAttrSize(FSDirectory fsd, XAttr xAttr) {
  if (fsd.getXattrMaxSize() == 0) {
    return;
  }
  int size = xAttr.getName().getBytes(Charsets.UTF_8).length;
  if (xAttr.getValue() != null) {
    size += xAttr.getValue().length;
  }
  if (size > fsd.getXattrMaxSize()) {
    throw new HadoopIllegalArgumentException(
        "The XAttr is too big. The maximum combined size of the"
        + " name and value is " + fsd.getXattrMaxSize()
        + ", but the total size is " + size);
  }
}
 
Example 7
Source Project: hadoop   Source File: DFSZKFailoverController.java    License: Apache License 2.0 6 votes vote down vote up
public static DFSZKFailoverController create(Configuration conf) {
  Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
  String nsId = DFSUtil.getNamenodeNameServiceId(conf);

  if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
    throw new HadoopIllegalArgumentException(
        "HA is not enabled for this namenode.");
  }
  String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
  if (nnId == null) {
    String msg = "Could not get the namenode ID of this node. " +
        "You may run zkfc on the node other than namenode.";
    throw new HadoopIllegalArgumentException(msg);
  }
  NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
  DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
  
  NNHAServiceTarget localTarget = new NNHAServiceTarget(
      localNNConf, nsId, nnId);
  return new DFSZKFailoverController(localNNConf, localTarget);
}
 
Example 8
Source Project: hadoop   Source File: ByteArrayManager.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @return the least power of two greater than or equal to n, i.e. return
 *         the least integer x with x >= n and x a power of two.
 *
 * @throws HadoopIllegalArgumentException
 *           if n <= 0.
 */
public static int leastPowerOfTwo(final int n) {
  if (n <= 0) {
    throw new HadoopIllegalArgumentException("n = " + n + " <= 0");
  }

  final int highestOne = Integer.highestOneBit(n);
  if (highestOne == n) {
    return n; // n is a power of two.
  }
  final int roundUp = highestOne << 1;
  if (roundUp < 0) {
    final long overflow = ((long) highestOne) << 1;
    throw new ArithmeticException(
        "Overflow: for n = " + n + ", the least power of two (the least"
        + " integer x with x >= n and x a power of two) = "
        + overflow + " > Integer.MAX_VALUE = " + Integer.MAX_VALUE);
  }
  return roundUp;
}
 
Example 9
Source Project: hadoop   Source File: HAUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Get the namenode Id by matching the {@code addressKey}
 * with the the address of the local node.
 * 
 * If {@link DFSConfigKeys#DFS_HA_NAMENODE_ID_KEY} is not specifically
 * configured, this method determines the namenode Id by matching the local
 * node's address with the configured addresses. When a match is found, it
 * returns the namenode Id from the corresponding configuration key.
 * 
 * @param conf Configuration
 * @return namenode Id on success, null on failure.
 * @throws HadoopIllegalArgumentException on error
 */
public static String getNameNodeId(Configuration conf, String nsId) {
  String namenodeId = conf.getTrimmed(DFS_HA_NAMENODE_ID_KEY);
  if (namenodeId != null) {
    return namenodeId;
  }
  
  String suffixes[] = DFSUtil.getSuffixIDs(conf, DFS_NAMENODE_RPC_ADDRESS_KEY,
      nsId, null, DFSUtil.LOCAL_ADDRESS_MATCHER);
  if (suffixes == null) {
    String msg = "Configuration " + DFS_NAMENODE_RPC_ADDRESS_KEY + 
        " must be suffixed with nameservice and namenode ID for HA " +
        "configuration.";
    throw new HadoopIllegalArgumentException(msg);
  }
  
  return suffixes[1];
}
 
Example 10
Source Project: hadoop   Source File: ActiveStandbyElector.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * To participate in election, the app will call joinElection. The result will
 * be notified by a callback on either the becomeActive or becomeStandby app
 * interfaces. <br/>
 * After this the elector will automatically monitor the leader status and
 * perform re-election if necessary<br/>
 * The app could potentially start off in standby mode and ignore the
 * becomeStandby call.
 * 
 * @param data
 *          to be set by the app. non-null data must be set.
 * @throws HadoopIllegalArgumentException
 *           if valid data is not supplied
 */
public synchronized void joinElection(byte[] data)
    throws HadoopIllegalArgumentException {
  
  if (data == null) {
    throw new HadoopIllegalArgumentException("data cannot be null");
  }
  
  if (wantToBeInElection) {
    LOG.info("Already in election. Not re-connecting.");
    return;
  }

  appData = new byte[data.length];
  System.arraycopy(data, 0, appData, 0, data.length);

  LOG.debug("Attempting active election for " + this);
  joinElectionInternal();
}
 
Example 11
Source Project: hadoop   Source File: XAttrCommands.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
  name = StringUtils.popOptionWithArgument("-n", args);
  String v = StringUtils.popOptionWithArgument("-v", args);
  if (v != null) {
    value = XAttrCodec.decodeValue(v);
  }
  xname = StringUtils.popOptionWithArgument("-x", args);

  if (name != null && xname != null) {
    throw new HadoopIllegalArgumentException(
        "Can not specify both '-n name' and '-x name' option.");
  }
  if (name == null && xname == null) {
    throw new HadoopIllegalArgumentException(
        "Must specify '-n name' or '-x name' option.");
  }

  if (args.isEmpty()) {
    throw new HadoopIllegalArgumentException("<path> is missing.");
  }
  if (args.size() > 1) {
    throw new HadoopIllegalArgumentException("Too many arguments.");
  }
}
 
Example 12
Source Project: hadoop   Source File: LightWeightCache.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public E put(final E entry) {
  if (!(entry instanceof Entry)) {
    throw new HadoopIllegalArgumentException(
        "!(entry instanceof Entry), entry.getClass()=" + entry.getClass());
  }

  evictExpiredEntries();

  final E existing = super.put(entry);
  if (existing != null) {
    queue.remove(existing);
  }

  final Entry e = (Entry)entry;
  setExpirationTime(e, creationExpirationPeriod);
  queue.offer(e);
  
  evictEntries();
  return existing;
}
 
Example 13
Source Project: hadoop   Source File: RPC.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Build the RPC Server. 
 * @throws IOException on error
 * @throws HadoopIllegalArgumentException when mandatory fields are not set
 */
public Server build() throws IOException, HadoopIllegalArgumentException {
  if (this.conf == null) {
    throw new HadoopIllegalArgumentException("conf is not set");
  }
  if (this.protocol == null) {
    throw new HadoopIllegalArgumentException("protocol is not set");
  }
  if (this.instance == null) {
    throw new HadoopIllegalArgumentException("instance is not set");
  }
  
  return getProtocolEngine(this.protocol, this.conf).getServer(
      this.protocol, this.instance, this.bindAddress, this.port,
      this.numHandlers, this.numReaders, this.queueSizePerHandler,
      this.verbose, this.conf, this.secretManager, this.portRangeConfig);
}
 
Example 14
Source Project: hadoop-ozone   Source File: NodeSchema.java    License: Apache License 2.0 5 votes vote down vote up
public NodeSchema build() {
  if (type == null) {
    throw new HadoopIllegalArgumentException("Type is mandatory for a " +
        "network topology node layer definition");
  }
  if (cost == -1) {
    cost = type.getCost();
  }
  return new NodeSchema(type, cost, prefix, defaultName);
}
 
Example 15
Source Project: hadoop-ozone   Source File: HttpConfig.java    License: Apache License 2.0 5 votes vote down vote up
public static Policy getHttpPolicy(ConfigurationSource conf) {
  String policyStr = conf.get(OzoneConfigKeys.OZONE_HTTP_POLICY_KEY,
      OzoneConfigKeys.OZONE_HTTP_POLICY_DEFAULT);
  HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
  if (policy == null) {
    throw new HadoopIllegalArgumentException("Unregonized value '"
        + policyStr + "' for " + OzoneConfigKeys.OZONE_HTTP_POLICY_KEY);
  }
  conf.set(OzoneConfigKeys.OZONE_HTTP_POLICY_KEY, policy.name());
  return policy;
}
 
Example 16
Source Project: hadoop-ozone   Source File: OzoneGetConf.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void checkArgs(String[] args) {
  if (args.length != 1) {
    throw new HadoopIllegalArgumentException(
        "usage: " + OzoneGetConf.Command.CONFKEY.getUsage());
  }
}
 
Example 17
Source Project: xxhadoop   Source File: HelloServer.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws HadoopIllegalArgumentException, IOException {
	Configuration conf = new Configuration();
	Builder builder = new RPC.Builder(conf);
	String bindAddress = "node-01";
	int port = 8888;
	builder.setBindAddress(bindAddress)
		.setPort(8888)
		.setProtocol(HelloProtocol.class)
		.setInstance(new HelloServer());
	Server server = builder.build();
	LOGGER.info("Server start to listen on " + port);
	server.start();
}
 
Example 18
Source Project: hadoop   Source File: CleanerService.java    License: Apache License 2.0 5 votes vote down vote up
private static int getPeriod(Configuration conf) {
  int periodInMinutes =
      conf.getInt(YarnConfiguration.SCM_CLEANER_PERIOD_MINS,
          YarnConfiguration.DEFAULT_SCM_CLEANER_PERIOD_MINS);
  // non-positive value is invalid; use the default
  if (periodInMinutes <= 0) {
    throw new HadoopIllegalArgumentException("Non-positive period value: "
        + periodInMinutes
        + ". The cleaner period must be greater than or equal to zero.");
  }
  return periodInMinutes;
}
 
Example 19
Source Project: hadoop   Source File: InMemorySCMStore.java    License: Apache License 2.0 5 votes vote down vote up
private static int getStalenessPeriod(Configuration conf) {
  int stalenessMinutes =
      conf.getInt(YarnConfiguration.IN_MEMORY_STALENESS_PERIOD_MINS,
          YarnConfiguration.DEFAULT_IN_MEMORY_STALENESS_PERIOD_MINS);
  // non-positive value is invalid; use the default
  if (stalenessMinutes <= 0) {
    throw new HadoopIllegalArgumentException("Non-positive staleness value: "
        + stalenessMinutes
        + ". The staleness value must be greater than zero.");
  }
  return stalenessMinutes;
}
 
Example 20
Source Project: hadoop   Source File: InMemorySCMStore.java    License: Apache License 2.0 5 votes vote down vote up
private static int getInitialDelay(Configuration conf) {
  int initialMinutes =
      conf.getInt(YarnConfiguration.IN_MEMORY_INITIAL_DELAY_MINS,
          YarnConfiguration.DEFAULT_IN_MEMORY_INITIAL_DELAY_MINS);
  // non-positive value is invalid; use the default
  if (initialMinutes <= 0) {
    throw new HadoopIllegalArgumentException(
        "Non-positive initial delay value: " + initialMinutes
            + ". The initial delay value must be greater than zero.");
  }
  return initialMinutes;
}
 
Example 21
Source Project: hadoop   Source File: InMemorySCMStore.java    License: Apache License 2.0 5 votes vote down vote up
private static int getCheckPeriod(Configuration conf) {
  int checkMinutes =
      conf.getInt(YarnConfiguration.IN_MEMORY_CHECK_PERIOD_MINS,
          YarnConfiguration.DEFAULT_IN_MEMORY_CHECK_PERIOD_MINS);
  // non-positive value is invalid; use the default
  if (checkMinutes <= 0) {
    throw new HadoopIllegalArgumentException(
        "Non-positive check period value: " + checkMinutes
            + ". The check period value must be greater than zero.");
  }
  return checkMinutes;
}
 
Example 22
Source Project: hadoop   Source File: HAUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @param conf Configuration. Please use verifyAndSetRMHAId to check.
 * @return RM Id on success
 */
public static String getRMHAId(Configuration conf) {
  int found = 0;
  String currentRMId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
  if(currentRMId == null) {
    for(String rmId : getRMHAIds(conf)) {
      String key = addSuffix(YarnConfiguration.RM_ADDRESS, rmId);
      String addr = conf.get(key);
      if (addr == null) {
        continue;
      }
      InetSocketAddress s;
      try {
        s = NetUtils.createSocketAddr(addr);
      } catch (Exception e) {
        LOG.warn("Exception in creating socket address " + addr, e);
        continue;
      }
      if (!s.isUnresolved() && NetUtils.isLocalAddress(s.getAddress())) {
        currentRMId = rmId.trim();
        found++;
      }
    }
  }
  if (found > 1) { // Only one address must match the local address
    String msg = "The HA Configuration has multiple addresses that match "
        + "local node's address.";
    throw new HadoopIllegalArgumentException(msg);
  }
  return currentRMId;
}
 
Example 23
Source Project: hadoop   Source File: YarnConfiguration.java    License: Apache License 2.0 5 votes vote down vote up
@Private
public static String getClusterId(Configuration conf) {
  String clusterId = conf.get(YarnConfiguration.RM_CLUSTER_ID);
  if (clusterId == null) {
    throw new HadoopIllegalArgumentException("Configuration doesn't specify " +
        YarnConfiguration.RM_CLUSTER_ID);
  }
  return clusterId;
}
 
Example 24
Source Project: hadoop   Source File: TestHSAdminServer.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void init() throws HadoopIllegalArgumentException, IOException {
  conf = new JobConf();
  conf.set(JHAdminConfig.JHS_ADMIN_ADDRESS, "0.0.0.0:0");
  conf.setClass("hadoop.security.group.mapping", MockUnixGroupsMapping.class,
      GroupMappingServiceProvider.class);
  conf.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec);
  conf.setBoolean(
        CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
        securityEnabled);
  Groups.getUserToGroupsMappingService(conf);
  jobHistoryService = mock(JobHistory.class);
  alds = mock(AggregatedLogDeletionService.class);

  hsAdminServer = new HSAdminServer(alds, jobHistoryService) {

    @Override
    protected Configuration createConf() {
      return conf;
    }
  };
  hsAdminServer.init(conf);
  hsAdminServer.start();
  conf.setSocketAddr(JHAdminConfig.JHS_ADMIN_ADDRESS,
      hsAdminServer.clientRpcServer.getListenerAddress());
  hsAdminClient = new HSAdmin(conf);
}
 
Example 25
Source Project: hadoop   Source File: DFSOutputStream.java    License: Apache License 2.0 5 votes vote down vote up
private DFSOutputStream(DFSClient dfsClient, String src, Progressable progress,
    HdfsFileStatus stat, DataChecksum checksum) throws IOException {
  super(getChecksum4Compute(checksum, stat));
  this.dfsClient = dfsClient;
  this.src = src;
  this.fileId = stat.getFileId();
  this.blockSize = stat.getBlockSize();
  this.blockReplication = stat.getReplication();
  this.fileEncryptionInfo = stat.getFileEncryptionInfo();
  this.progress = progress;
  this.cachingStrategy = new AtomicReference<CachingStrategy>(
      dfsClient.getDefaultWriteCachingStrategy());
  if ((progress != null) && DFSClient.LOG.isDebugEnabled()) {
    DFSClient.LOG.debug(
        "Set non-null progress callback on DFSOutputStream " + src);
  }
  
  this.bytesPerChecksum = checksum.getBytesPerChecksum();
  if (bytesPerChecksum <= 0) {
    throw new HadoopIllegalArgumentException(
        "Invalid value: bytesPerChecksum = " + bytesPerChecksum + " <= 0");
  }
  if (blockSize % bytesPerChecksum != 0) {
    throw new HadoopIllegalArgumentException("Invalid values: "
        + DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY + " (=" + bytesPerChecksum
        + ") must divide block size (=" + blockSize + ").");
  }
  this.checksum4WriteBlock = checksum;

  this.dfsclientSlowLogThresholdMs =
    dfsClient.getConf().dfsclientSlowIoWarningThresholdMs;
  this.byteArrayManager = dfsClient.getClientContext().getByteArrayManager();
}
 
Example 26
Source Project: hadoop   Source File: DFSUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get http policy. Http Policy is chosen as follows:
 * <ol>
 * <li>If hadoop.ssl.enabled is set, http endpoints are not started. Only
 * https endpoints are started on configured https ports</li>
 * <li>This configuration is overridden by dfs.https.enable configuration, if
 * it is set to true. In that case, both http and https endpoints are stared.</li>
 * <li>All the above configurations are overridden by dfs.http.policy
 * configuration. With this configuration you can set http-only, https-only
 * and http-and-https endpoints.</li>
 * </ol>
 * See hdfs-default.xml documentation for more details on each of the above
 * configuration settings.
 */
public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
  String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY);
  if (policyStr == null) {
    boolean https = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
        DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT);

    boolean hadoopSsl = conf.getBoolean(
        CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY,
        CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT);

    if (hadoopSsl) {
      LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY
          + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
          + ".");
    }
    if (https) {
      LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
          + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
          + ".");
    }

    return (hadoopSsl || https) ? HttpConfig.Policy.HTTP_AND_HTTPS
        : HttpConfig.Policy.HTTP_ONLY;
  }

  HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
  if (policy == null) {
    throw new HadoopIllegalArgumentException("Unregonized value '"
        + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
  }

  conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
  return policy;
}
 
Example 27
Source Project: hadoop   Source File: LeaseRenewer.java    License: Apache License 2.0 5 votes vote down vote up
private Key(final String authority, final UserGroupInformation ugi) {
  if (authority == null) {
    throw new HadoopIllegalArgumentException("authority == null");
  } else if (ugi == null) {
    throw new HadoopIllegalArgumentException("ugi == null");
  }

  this.authority = authority;
  this.ugi = ugi;
}
 
Example 28
Source Project: hadoop   Source File: LeaseRenewer.java    License: Apache License 2.0 5 votes vote down vote up
private void unsyncSetGraceSleepPeriod(final long gracePeriod) {
  if (gracePeriod < 100L) {
    throw new HadoopIllegalArgumentException(gracePeriod
        + " = gracePeriod < 100ms is too small.");
  }
  this.gracePeriod = gracePeriod;
  final long half = gracePeriod/2;
  this.sleepPeriod = half < LEASE_RENEWER_SLEEP_DEFAULT?
      half: LEASE_RENEWER_SLEEP_DEFAULT;
}
 
Example 29
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
private DataChecksum createChecksum(ChecksumOpt userOpt) {
  // Fill in any missing field with the default.
  ChecksumOpt myOpt = ChecksumOpt.processChecksumOpt(
      defaultChecksumOpt, userOpt);
  DataChecksum dataChecksum = DataChecksum.newDataChecksum(
      myOpt.getChecksumType(),
      myOpt.getBytesPerChecksum());
  if (dataChecksum == null) {
    throw new HadoopIllegalArgumentException("Invalid checksum type: userOpt="
        + userOpt + ", default=" + defaultChecksumOpt
        + ", effective=null");
  }
  return dataChecksum;
}
 
Example 30
Source Project: hadoop   Source File: DFSClient.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Truncate a file to an indicated size
 * See {@link ClientProtocol#truncate}.
 */
public boolean truncate(String src, long newLength) throws IOException {
  checkOpen();
  if (newLength < 0) {
    throw new HadoopIllegalArgumentException(
        "Cannot truncate to a negative file size: " + newLength + ".");
  }
  try {
    return namenode.truncate(src, newLength, clientName);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
        UnresolvedPathException.class);
  }
}