Java Code Examples for org.apache.hadoop.conf.Configuration.getTrimmed()

The following are Jave code examples for showing how to use getTrimmed() of the org.apache.hadoop.conf.Configuration class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop   File: TestS3AConfiguration.java   Source Code and License Vote up 7 votes
/**
 * Test if custom endpoint is picked up.
 * <p/>
 * The test expects TEST_ENDPOINT to be defined in the Configuration
 * describing the endpoint of the bucket to which TEST_FS_S3A_NAME points
 * (f.i. "s3-eu-west-1.amazonaws.com" if the bucket is located in Ireland).
 * Evidently, the bucket has to be hosted in the region denoted by the
 * endpoint for the test to succeed.
 * <p/>
 * More info and the list of endpoint identifiers:
 * http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
 *
 * @throws Exception
 */
@Test
public void TestEndpoint() throws Exception {
  conf = new Configuration();
  String endpoint = conf.getTrimmed(TEST_ENDPOINT, "");
  if (endpoint.isEmpty()) {
    LOG.warn("Custom endpoint test skipped as " + TEST_ENDPOINT + "config " +
        "setting was not detected");
  } else {
    conf.set(Constants.ENDPOINT, endpoint);
    fs = S3ATestUtils.createTestFileSystem(conf);
    AmazonS3Client s3 = fs.getAmazonS3Client();
    String endPointRegion = "";
    // Differentiate handling of "s3-" and "s3." based endpoint identifiers
    String[] endpointParts = StringUtils.split(endpoint, '.');
    if (endpointParts.length == 3) {
      endPointRegion = endpointParts[0].substring(3);
    } else if (endpointParts.length == 4) {
      endPointRegion = endpointParts[1];
    } else {
      fail("Unexpected endpoint");
    }
    assertEquals("Endpoint config setting and bucket location differ: ",
        endPointRegion, s3.getBucketLocation(fs.getUri().getHost()));
  }
}
 
Example 2
Project: hadoop   File: WebAppUtils.java   Source Code and License Vote up 6 votes
/**
 * Get the URL to use for binding where bind hostname can be specified
 * to override the hostname in the webAppURLWithoutScheme. Port specified in the
 * webAppURLWithoutScheme will be used.
 *
 * @param conf the configuration
 * @param hostProperty bind host property name
 * @param webAppURLWithoutScheme web app URL without scheme String
 * @return String representing bind URL
 */
public static String getWebAppBindURL(
    Configuration conf,
    String hostProperty,
    String webAppURLWithoutScheme) {

  // If the bind-host setting exists then it overrides the hostname
  // portion of the corresponding webAppURLWithoutScheme
  String host = conf.getTrimmed(hostProperty);
  if (host != null && !host.isEmpty()) {
    if (webAppURLWithoutScheme.contains(":")) {
      webAppURLWithoutScheme = host + ":" + webAppURLWithoutScheme.split(":")[1];
    }
    else {
      throw new YarnRuntimeException("webAppURLWithoutScheme must include port specification but doesn't: " +
                                     webAppURLWithoutScheme);
    }
  }

  return webAppURLWithoutScheme;
}
 
Example 3
Project: hadoop   File: CuratorService.java   Source Code and License Vote up 6 votes
/**
 * Init the service.
 * This is where the security bindings are set up
 * @param conf configuration of the service
 * @throws Exception
 */
@Override
protected void serviceInit(Configuration conf) throws Exception {

  registryRoot = conf.getTrimmed(KEY_REGISTRY_ZK_ROOT,
      DEFAULT_ZK_REGISTRY_ROOT);

  // create and add the registy service
  registrySecurity = new RegistrySecurity("registry security");
  addService(registrySecurity);

  if (LOG.isDebugEnabled()) {
    LOG.debug("Creating Registry with root {}", registryRoot);
  }

  super.serviceInit(conf);
}
 
Example 4
Project: hadoop   File: RegistrySecurity.java   Source Code and License Vote up 6 votes
/**
 * Init the service: this sets up security based on the configuration
 * @param conf configuration
 * @throws Exception
 */
@Override
protected void serviceInit(Configuration conf) throws Exception {
  super.serviceInit(conf);
  String auth = conf.getTrimmed(KEY_REGISTRY_CLIENT_AUTH,
      REGISTRY_CLIENT_AUTH_ANONYMOUS);

  switch (auth) {
  case REGISTRY_CLIENT_AUTH_KERBEROS:
    access = AccessPolicy.sasl;
    break;
  case REGISTRY_CLIENT_AUTH_DIGEST:
    access = AccessPolicy.digest;
    break;
  case REGISTRY_CLIENT_AUTH_ANONYMOUS:
    access = AccessPolicy.anon;
    break;
  default:
    throw new ServiceStateException(E_UNKNOWN_AUTHENTICATION_MECHANISM
                                    + "\"" + auth + "\"");
  }
  initSecurity();
}
 
Example 5
Project: hadoop   File: MicroZookeeperService.java   Source Code and License Vote up 6 votes
/**
 * set up security. this must be done prior to creating
 * the ZK instance, as it sets up JAAS if that has not been done already.
 *
 * @return true if the cluster has security enabled.
 */
public boolean setupSecurity() throws IOException {
  Configuration conf = getConfig();
  String jaasContext = conf.getTrimmed(KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT);
  secureServer = StringUtils.isNotEmpty(jaasContext);
  if (secureServer) {
    RegistrySecurity.validateContext(jaasContext);
    RegistrySecurity.bindZKToServerJAASContext(jaasContext);
    // policy on failed auth
    System.setProperty(PROP_ZK_ALLOW_FAILED_SASL_CLIENTS,
      conf.get(KEY_ZKSERVICE_ALLOW_FAILED_SASL_CLIENTS,
          "true"));

    //needed so that you can use sasl: strings in the registry
    System.setProperty(RegistryInternalConstants.ZOOKEEPER_AUTH_PROVIDER +".1",
        RegistryInternalConstants.SASLAUTHENTICATION_PROVIDER);
    String serverContext =
        System.getProperty(PROP_ZK_SERVER_SASL_CONTEXT);
    addDiagnostics("Server JAAS context s = %s", serverContext);
    return true;
  } else {
    return false;
  }
}
 
Example 6
Project: hadoop   File: HAUtil.java   Source Code and License Vote up 6 votes
private static void checkAndSetRMRPCAddress(String prefix, String RMId,
    Configuration conf) {
  String rpcAddressConfKey = null;
  try {
    rpcAddressConfKey = addSuffix(prefix, RMId);
    if (conf.getTrimmed(rpcAddressConfKey) == null) {
      String hostNameConfKey = addSuffix(YarnConfiguration.RM_HOSTNAME, RMId);
      String confVal = conf.getTrimmed(hostNameConfKey);
      if (confVal == null) {
        throwBadConfigurationException(getNeedToSetValueMessage(
            hostNameConfKey + " or " + addSuffix(prefix, RMId)));
      } else {
        conf.set(addSuffix(prefix, RMId), confVal + ":"
            + YarnConfiguration.getRMDefaultPortNumber(prefix, conf));
      }
    }
  } catch (IllegalArgumentException iae) {
    String errmsg = iae.getMessage();
    if (rpcAddressConfKey == null) {
      // Error at addSuffix
      errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID, RMId);
    }
    throwBadConfigurationException(errmsg);
  }
}
 
Example 7
Project: hadoop   File: HAUtil.java   Source Code and License Vote up 6 votes
/**
 * Get the namenode Id by matching the {@code addressKey}
 * with the the address of the local node.
 * 
 * If {@link DFSConfigKeys#DFS_HA_NAMENODE_ID_KEY} is not specifically
 * configured, this method determines the namenode Id by matching the local
 * node's address with the configured addresses. When a match is found, it
 * returns the namenode Id from the corresponding configuration key.
 * 
 * @param conf Configuration
 * @return namenode Id on success, null on failure.
 * @throws HadoopIllegalArgumentException on error
 */
public static String getNameNodeId(Configuration conf, String nsId) {
  String namenodeId = conf.getTrimmed(DFS_HA_NAMENODE_ID_KEY);
  if (namenodeId != null) {
    return namenodeId;
  }
  
  String suffixes[] = DFSUtil.getSuffixIDs(conf, DFS_NAMENODE_RPC_ADDRESS_KEY,
      nsId, null, DFSUtil.LOCAL_ADDRESS_MATCHER);
  if (suffixes == null) {
    String msg = "Configuration " + DFS_NAMENODE_RPC_ADDRESS_KEY + 
        " must be suffixed with nameservice and namenode ID for HA " +
        "configuration.";
    throw new HadoopIllegalArgumentException(msg);
  }
  
  return suffixes[1];
}
 
Example 8
Project: hadoop   File: S3ATestUtils.java   Source Code and License Vote up 6 votes
public static S3AFileSystem createTestFileSystem(Configuration conf) throws
    IOException {
  String fsname = conf.getTrimmed(TestS3AFileSystemContract.TEST_FS_S3A_NAME, "");


  boolean liveTest = !StringUtils.isEmpty(fsname);
  URI testURI = null;
  if (liveTest) {
    testURI = URI.create(fsname);
    liveTest = testURI.getScheme().equals(Constants.FS_S3A);
  }
  if (!liveTest) {
    // This doesn't work with our JUnit 3 style test cases, so instead we'll
    // make this whole class not run by default
    throw new AssumptionViolatedException(
        "No test filesystem in " + TestS3AFileSystemContract.TEST_FS_S3A_NAME);
  }
  S3AFileSystem fs1 = new S3AFileSystem();
  //enable purging in tests
  conf.setBoolean(Constants.PURGE_EXISTING_MULTIPART, true);
  conf.setInt(Constants.PURGE_EXISTING_MULTIPART_AGE, 0);
  fs1.initialize(testURI, conf);
  return fs1;
}
 
Example 9
Project: hadoop   File: HAUtil.java   Source Code and License Vote up 5 votes
/**
 * @param conf Configuration. Please use verifyAndSetRMHAId to check.
 * @return RM Id on success
 */
public static String getRMHAId(Configuration conf) {
  int found = 0;
  String currentRMId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
  if(currentRMId == null) {
    for(String rmId : getRMHAIds(conf)) {
      String key = addSuffix(YarnConfiguration.RM_ADDRESS, rmId);
      String addr = conf.get(key);
      if (addr == null) {
        continue;
      }
      InetSocketAddress s;
      try {
        s = NetUtils.createSocketAddr(addr);
      } catch (Exception e) {
        LOG.warn("Exception in creating socket address " + addr, e);
        continue;
      }
      if (!s.isUnresolved() && NetUtils.isLocalAddress(s.getAddress())) {
        currentRMId = rmId.trim();
        found++;
      }
    }
  }
  if (found > 1) { // Only one address must match the local address
    String msg = "The HA Configuration has multiple addresses that match "
        + "local node's address.";
    throw new HadoopIllegalArgumentException(msg);
  }
  return currentRMId;
}
 
Example 10
Project: hadoop   File: HAUtil.java   Source Code and License Vote up 5 votes
public static String getConfValueForRMInstance(String prefix,
                                               Configuration conf) {
  String confKey = getConfKeyForRMInstance(prefix, conf);
  String retVal = conf.getTrimmed(confKey);
  if (LOG.isTraceEnabled()) {
    LOG.trace("getConfValueForRMInstance: prefix = " + prefix +
        "; confKey being looked up = " + confKey +
        "; value being set to = " + retVal);
  }
  return retVal;
}
 
Example 11
Project: hadoop   File: BackupNode.java   Source Code and License Vote up 5 votes
@Override
protected InetSocketAddress getServiceRpcServerAddress(Configuration conf) {
  String addr = conf.getTrimmed(BN_SERVICE_RPC_ADDRESS_KEY);
  if (addr == null || addr.isEmpty()) {
    return null;
  }
  return NetUtils.createSocketAddr(addr);
}
 
Example 12
Project: hadoop   File: NameNode.java   Source Code and License Vote up 5 votes
/**
 * Fetches the address for services to use when connecting to namenode
 * based on the value of fallback returns null if the special
 * address is not specified or returns the default namenode address
 * to be used by both clients and services.
 * Services here are datanodes, backup node, any non client connection
 */
public static InetSocketAddress getServiceAddress(Configuration conf,
                                                      boolean fallback) {
  String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
  if (addr == null || addr.isEmpty()) {
    return fallback ? getAddress(conf) : null;
  }
  return getAddress(addr);
}
 
Example 13
Project: hadoop   File: NameNode.java   Source Code and License Vote up 5 votes
/**
 * Gets a trimmed value from configuration, or null if no value is defined.
 *
 * @param conf configuration
 * @param key configuration key to get
 * @return trimmed value, or null if no value is defined
 */
private static String getTrimmedOrNull(Configuration conf, String key) {
  String addr = conf.getTrimmed(key);
  if (addr == null || addr.isEmpty()) {
    return null;
  }
  return addr;
}
 
Example 14
Project: hadoop   File: DatanodeHttpServer.java   Source Code and License Vote up 5 votes
private static String getHostnameForSpnegoPrincipal(Configuration conf) {
  String addr = conf.getTrimmed(DFS_DATANODE_HTTP_ADDRESS_KEY, null);
  if (addr == null) {
    addr = conf.getTrimmed(DFS_DATANODE_HTTPS_ADDRESS_KEY,
                           DFS_DATANODE_HTTPS_ADDRESS_DEFAULT);
  }
  InetSocketAddress inetSocker = NetUtils.createSocketAddr(addr);
  return inetSocker.getHostString();
}
 
Example 15
Project: hadoop   File: DataNode.java   Source Code and License Vote up 5 votes
static DomainPeerServer getDomainPeerServer(Configuration conf,
    int port) throws IOException {
  String domainSocketPath =
      conf.getTrimmed(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
  if (domainSocketPath.isEmpty()) {
    if (conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT) &&
       (!conf.getBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
        DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
      LOG.warn("Although short-circuit local reads are configured, " +
          "they are disabled because you didn't configure " +
          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
    }
    return null;
  }
  if (DomainSocket.getLoadingFailureReason() != null) {
    throw new RuntimeException("Although a UNIX domain socket " +
        "path is configured as " + domainSocketPath + ", we cannot " +
        "start a localDataXceiverServer because " +
        DomainSocket.getLoadingFailureReason());
  }
  DomainPeerServer domainPeerServer =
    new DomainPeerServer(domainSocketPath, port);
  domainPeerServer.setReceiveBufferSize(
      HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
  return domainPeerServer;
}
 
Example 16
Project: cloudup   File: AbstractS3ACloudupTest.java   Source Code and License Vote up 4 votes
@Before
public void setup() throws Exception {
  super.setup();
  String key = String.format(AbstractBondedFSContract.FSNAME_OPTION, "s3a");
  Configuration conf = createConfiguration();
  String fsVal = conf.getTrimmed(key);
  assertFalse("No FS set in " + key, StringUtils.isEmpty(fsVal));
  URI fsURI = new URI(fsVal);
  assertEquals("Not an S3A Filesystem: " + fsURI,
      "s3a", fsURI.getScheme());
  fileSystem = (S3AFileSystem) FileSystem.get(fsURI, conf);
  root = new Path(getFileSystem().getUri());
}
 
Example 17
Project: hadoop   File: BackupNode.java   Source Code and License Vote up 4 votes
@Override // NameNode
protected InetSocketAddress getRpcServerAddress(Configuration conf) {
  String addr = conf.getTrimmed(BN_ADDRESS_NAME_KEY, BN_ADDRESS_DEFAULT);
  return NetUtils.createSocketAddr(addr);
}
 
Example 18
Project: hadoop   File: S3Credentials.java   Source Code and License Vote up 4 votes
/**
 * @throws IllegalArgumentException if credentials for S3 cannot be
 * determined.
 */
public void initialize(URI uri, Configuration conf) {
  if (uri.getHost() == null) {
    throw new IllegalArgumentException("Invalid hostname in URI " + uri);
  }
  
  String userInfo = uri.getUserInfo();
  if (userInfo != null) {
    int index = userInfo.indexOf(':');
    if (index != -1) {
      accessKey = userInfo.substring(0, index);
      secretAccessKey = userInfo.substring(index + 1);
    } else {
      accessKey = userInfo;
    }
  }
  
  String scheme = uri.getScheme();
  String accessKeyProperty = String.format("fs.%s.awsAccessKeyId", scheme);
  String secretAccessKeyProperty =
    String.format("fs.%s.awsSecretAccessKey", scheme);
  if (accessKey == null) {
    accessKey = conf.getTrimmed(accessKeyProperty);
  }
  if (secretAccessKey == null) {
    secretAccessKey = conf.getTrimmed(secretAccessKeyProperty);
  }
  if (accessKey == null && secretAccessKey == null) {
    throw new IllegalArgumentException("AWS " +
                                       "Access Key ID and Secret Access " +
                                       "Key must be specified as the " +
                                       "username or password " +
                                       "(respectively) of a " + scheme +
                                       " URL, or by setting the " +
                                       accessKeyProperty + " or " +
                                       secretAccessKeyProperty +
                                       " properties (respectively).");
  } else if (accessKey == null) {
    throw new IllegalArgumentException("AWS " +
                                       "Access Key ID must be specified " +
                                       "as the username of a " + scheme +
                                       " URL, or by setting the " +
                                       accessKeyProperty + " property.");
  } else if (secretAccessKey == null) {
    throw new IllegalArgumentException("AWS " +
                                       "Secret Access Key must be " +
                                       "specified as the password of a " +
                                       scheme + " URL, or by setting the " +
                                       secretAccessKeyProperty +
                                       " property.");       
  }

}