Java Code Examples for org.apache.hadoop.conf.Configuration.get()

The following are Jave code examples for showing how to use get() of the org.apache.hadoop.conf.Configuration class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop   File: AdminService.java   Source Code and License Vote up 7 votes
@Override
public void serviceInit(Configuration conf) throws Exception {
  if (rmContext.isHAEnabled()) {
    autoFailoverEnabled = HAUtil.isAutomaticFailoverEnabled(conf);
    if (autoFailoverEnabled) {
      if (HAUtil.isAutomaticFailoverEmbedded(conf)) {
        embeddedElector = createEmbeddedElectorService();
        addIfService(embeddedElector);
      }
    }
  }

  masterServiceBindAddress = conf.getSocketAddr(
      YarnConfiguration.RM_BIND_HOST,
      YarnConfiguration.RM_ADMIN_ADDRESS,
      YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
      YarnConfiguration.DEFAULT_RM_ADMIN_PORT);
  daemonUser = UserGroupInformation.getCurrentUser();
  authorizer = YarnAuthorizationProvider.getInstance(conf);
  authorizer.setAdmins(getAdminAclList(conf), UserGroupInformation
      .getCurrentUser());
  rmId = conf.get(YarnConfiguration.RM_HA_ID);
  super.serviceInit(conf);
}
 
Example 2
Project: angel   File: ConfUtils.java   Source Code and License Vote up 6 votes
private static void addResourceFiles(Configuration conf, String fileNames)
  throws MalformedURLException {
  String[] fileNameArray = fileNames.split(",");
  StringBuilder sb = new StringBuilder();
  for (int i = 0; i < fileNameArray.length; i++) {
    if (i != 0) {
      sb.append(",");
    }
    URL url = new File(fileNameArray[i]).toURI().toURL();
    sb.append(url.toString());
  }

  String addJars = conf.get(AngelConf.ANGEL_JOB_LIBJARS);

  if (addJars == null || addJars.trim().isEmpty()) {
    conf.set(AngelConf.ANGEL_JOB_LIBJARS, sb.toString());
  } else {
    conf.set(AngelConf.ANGEL_JOB_LIBJARS, sb.toString() + "," + addJars);
  }
}
 
Example 3
Project: hadoop-oss   File: KMSACLs.java   Source Code and License Vote up 6 votes
private void setKMSACLs(Configuration conf) {
  Map<Type, AccessControlList> tempAcls = new HashMap<Type, AccessControlList>();
  Map<Type, AccessControlList> tempBlacklist = new HashMap<Type, AccessControlList>();
  for (Type aclType : Type.values()) {
    String aclStr = conf.get(aclType.getAclConfigKey(), ACL_DEFAULT);
    tempAcls.put(aclType, new AccessControlList(aclStr));
    String blacklistStr = conf.get(aclType.getBlacklistConfigKey());
    if (blacklistStr != null) {
      // Only add if blacklist is present
      tempBlacklist.put(aclType, new AccessControlList(blacklistStr));
      LOG.info("'{}' Blacklist '{}'", aclType, blacklistStr);
    }
    LOG.info("'{}' ACL '{}'", aclType, aclStr);
  }
  acls = tempAcls;
  blacklistedAcls = tempBlacklist;
}
 
Example 4
Project: hadoop   File: FileSystemRMStateStore.java   Source Code and License Vote up 6 votes
@Override
protected synchronized void startInternal() throws Exception {
  // create filesystem only now, as part of service-start. By this time, RM is
  // authenticated with kerberos so we are good to create a file-system
  // handle.
  Configuration conf = new Configuration(getConfig());
  conf.setBoolean("dfs.client.retry.policy.enabled", true);
  String retryPolicy =
      conf.get(YarnConfiguration.FS_RM_STATE_STORE_RETRY_POLICY_SPEC,
        YarnConfiguration.DEFAULT_FS_RM_STATE_STORE_RETRY_POLICY_SPEC);
  conf.set("dfs.client.retry.policy.spec", retryPolicy);

  fs = fsWorkingPath.getFileSystem(conf);
  mkdirsWithRetries(rmDTSecretManagerRoot);
  mkdirsWithRetries(rmAppRoot);
  mkdirsWithRetries(amrmTokenSecretManagerRoot);
}
 
Example 5
Project: aliyun-maxcompute-data-collectors   File: UpdateOutputFormat.java   Source Code and License Vote up 6 votes
@Override
/** {@inheritDoc} */
public void checkOutputSpecs(JobContext context)
    throws IOException, InterruptedException {
  Configuration conf = context.getConfiguration();
  DBConfiguration dbConf = new DBConfiguration(conf);

  // Sanity check all the configuration values we need.
  if (null == conf.get(DBConfiguration.URL_PROPERTY)) {
    throw new IOException("Database connection URL is not set.");
  } else if (null == dbConf.getOutputTableName()) {
    throw new IOException("Table name is not set for export.");
  } else if (null == dbConf.getOutputFieldNames()) {
    throw new IOException(
        "Output field names are null.");
  } else if (null == conf.get(ExportJobBase.SQOOP_EXPORT_UPDATE_COL_KEY)) {
    throw new IOException("Update key column is not set for export.");
  }
}
 
Example 6
Project: hadoop   File: CopyCommitter.java   Source Code and License Vote up 6 votes
private void preserveFileAttributesForDirectories(Configuration conf) throws IOException {
  String attrSymbols = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
  final boolean syncOrOverwrite = syncFolder || overwrite;

  LOG.info("About to preserve attributes: " + attrSymbols);

  EnumSet<FileAttribute> attributes = DistCpUtils.unpackAttributes(attrSymbols);
  final boolean preserveRawXattrs =
      conf.getBoolean(DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false);

  Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
  FileSystem clusterFS = sourceListing.getFileSystem(conf);
  SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf,
                                    SequenceFile.Reader.file(sourceListing));
  long totalLen = clusterFS.getFileStatus(sourceListing).getLen();

  Path targetRoot = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));

  long preservedEntries = 0;
  try {
    CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
    Text srcRelPath = new Text();

    // Iterate over every source path that was copied.
    while (sourceReader.next(srcRelPath, srcFileStatus)) {
      // File-attributes for files are set at the time of copy,
      // in the map-task.
      if (! srcFileStatus.isDirectory()) continue;

      Path targetFile = new Path(targetRoot.toString() + "/" + srcRelPath);
      //
      // Skip the root folder when syncOrOverwrite is true.
      //
      if (targetRoot.equals(targetFile) && syncOrOverwrite) continue;

      FileSystem targetFS = targetFile.getFileSystem(conf);
      DistCpUtils.preserve(targetFS, targetFile, srcFileStatus, attributes,
          preserveRawXattrs);

      taskAttemptContext.progress();
      taskAttemptContext.setStatus("Preserving status on directory entries. [" +
          sourceReader.getPosition() * 100 / totalLen + "%]");
    }
  } finally {
    IOUtils.closeStream(sourceReader);
  }
  LOG.info("Preserved status on " + preservedEntries + " dir entries on target");
}
 
Example 7
Project: hadoop-oss   File: SecurityUtil.java   Source Code and License Vote up 6 votes
/**
 * Retrieve the name of the current host. Multihomed hosts may restrict the
 * hostname lookup to a specific interface and nameserver with {@link
 * org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_SECURITY_DNS_INTERFACE_KEY}
 * and {@link org.apache.hadoop.fs.CommonConfigurationKeysPublic#HADOOP_SECURITY_DNS_NAMESERVER_KEY}
 *
 * @param conf Configuration object. May be null.
 * @return
 * @throws UnknownHostException
 */
static String getLocalHostName(@Nullable Configuration conf)
    throws UnknownHostException {
  if (conf != null) {
    String dnsInterface = conf.get(HADOOP_SECURITY_DNS_INTERFACE_KEY);
    String nameServer = conf.get(HADOOP_SECURITY_DNS_NAMESERVER_KEY);

    if (dnsInterface != null) {
      return DNS.getDefaultHost(dnsInterface, nameServer, true);
    } else if (nameServer != null) {
      throw new IllegalArgumentException(HADOOP_SECURITY_DNS_NAMESERVER_KEY +
          " requires " + HADOOP_SECURITY_DNS_INTERFACE_KEY + ". Check your" +
          "configuration.");
    }
  }

  // Fallback to querying the default hostname as we did before.
  return InetAddress.getLocalHost().getCanonicalHostName();
}
 
Example 8
Project: hadoop-oss   File: GangliaContext31.java   Source Code and License Vote up 6 votes
@Override
public void init(String contextName, ContextFactory factory) {
  super.init(contextName, factory);

  LOG.debug("Initializing the GangliaContext31 for Ganglia 3.1 metrics.");

  // Take the hostname from the DNS class.

  Configuration conf = new Configuration();

  if (conf.get("slave.host.name") != null) {
    hostName = conf.get("slave.host.name");
  } else {
    try {
      hostName = DNS.getDefaultHost(
        conf.get("dfs.datanode.dns.interface","default"),
        conf.get("dfs.datanode.dns.nameserver","default"));
    } catch (UnknownHostException uhe) {
      LOG.error(uhe);
  	hostName = "UNKNOWN.example.com";
    }
  }
}
 
Example 9
Project: aliyun-tablestore-hbase-client   File: TestPutRow.java   Source Code and License Vote up 5 votes
public TestPutRow() throws IOException, InterruptedException {
    Configuration config = HBaseConfiguration.create();

    Connection connection = ConnectionFactory.createConnection(config);
    familyName = config.get("hbase.client.tablestore.family");

    TableName tableName = TableName.valueOf(config.get("hbase.client.tablestore.table"));
    if (!connection.getAdmin().tableExists(tableName)) {
        HTableDescriptor descriptor = new HTableDescriptor(tableName);
        connection.getAdmin().createTable(descriptor);
        TimeUnit.SECONDS.sleep(1);
    }
    table = connection.getTable(tableName);
}
 
Example 10
Project: scheduling-connector-for-hadoop   File: HPCConfiguration.java   Source Code and License Vote up 5 votes
public static String[] getHPCLocalDirs(Configuration conf) {
  String getLocalDirs = conf.get(
      HPCConfiguration.YARN_APPLICATION_HPC_LOCAL_DIRS, conf.get(
          YarnConfiguration.NM_LOCAL_DIRS,
          YarnConfiguration.DEFAULT_NM_LOCAL_DIRS));
  return StringUtils.getTrimmedStrings(getLocalDirs);
}
 
Example 11
Project: ditb   File: EncryptionUtil.java   Source Code and License Vote up 5 votes
/**
 * Protect a key by encrypting it with the secret key of the given subject.
 * The configuration must be set up correctly for key alias resolution.
 * @param conf configuration
 * @param subject subject key alias
 * @param key the key
 * @return the encrypted key bytes
 */
public static byte[] wrapKey(Configuration conf, String subject, Key key)
    throws IOException {
  // Wrap the key with the configured encryption algorithm.
  String algorithm =
      conf.get(HConstants.CRYPTO_KEY_ALGORITHM_CONF_KEY, HConstants.CIPHER_AES);
  Cipher cipher = Encryption.getCipher(conf, algorithm);
  if (cipher == null) {
    throw new RuntimeException("Cipher '" + algorithm + "' not available");
  }
  EncryptionProtos.WrappedKey.Builder builder = EncryptionProtos.WrappedKey.newBuilder();
  builder.setAlgorithm(key.getAlgorithm());
  byte[] iv = null;
  if (cipher.getIvLength() > 0) {
    iv = new byte[cipher.getIvLength()];
    RNG.nextBytes(iv);
    builder.setIv(ByteStringer.wrap(iv));
  }
  byte[] keyBytes = key.getEncoded();
  builder.setLength(keyBytes.length);
  builder.setHash(ByteStringer.wrap(Encryption.hash128(keyBytes)));
  ByteArrayOutputStream out = new ByteArrayOutputStream();
  Encryption.encryptWithSubjectKey(out, new ByteArrayInputStream(keyBytes), subject,
    conf, cipher, iv);
  builder.setData(ByteStringer.wrap(out.toByteArray()));
  // Build and return the protobuf message
  out.reset();
  builder.build().writeDelimitedTo(out);
  return out.toByteArray();
}
 
Example 12
Project: hadoop-oss   File: OsSecureRandom.java   Source Code and License Vote up 5 votes
@Override
synchronized public void setConf(Configuration conf) {
  this.conf = conf;
  this.randomDevPath = conf.get(
      HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY,
      HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT);
  close();
}
 
Example 13
Project: hadoop   File: DataNodeMetrics.java   Source Code and License Vote up 5 votes
public static DataNodeMetrics create(Configuration conf, String dnName) {
  String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY);
  MetricsSystem ms = DefaultMetricsSystem.instance();
  JvmMetrics jm = JvmMetrics.create("DataNode", sessionId, ms);
  String name = "DataNodeActivity-"+ (dnName.isEmpty()
      ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() 
          : dnName.replace(':', '-'));

  // Percentile measurement is off by default, by watching no intervals
  int[] intervals = 
      conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
  
  return ms.register(name, null, new DataNodeMetrics(name, sessionId,
      intervals, jm));
}
 
Example 14
Project: hadoop   File: SwiftTestUtils.java   Source Code and License Vote up 5 votes
/**
 * Get the test URI
 * @param conf configuration
 * @throws SwiftConfigurationException missing parameter or bad URI
 */
public static URI getServiceURI(Configuration conf) throws
                                                    SwiftConfigurationException {
  String instance = conf.get(TEST_FS_SWIFT);
  if (instance == null) {
    throw new SwiftConfigurationException(
      "Missing configuration entry " + TEST_FS_SWIFT);
  }
  try {
    return new URI(instance);
  } catch (URISyntaxException e) {
    throw new SwiftConfigurationException("Bad URI: " + instance);
  }
}
 
Example 15
Project: hadoop   File: TestDeprecatedKeys.java   Source Code and License Vote up 5 votes
public void testDeprecatedKeys() throws Exception {
  Configuration conf = new Configuration();
  conf.set("topology.script.file.name", "xyz");
  conf.set("topology.script.file.name", "xyz");
  String scriptFile = conf.get(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
  assertTrue(scriptFile.equals("xyz")) ;
}
 
Example 16
Project: hadoop   File: HSAdminServer.java   Source Code and License Vote up 4 votes
@Override
public void serviceInit(Configuration conf) throws Exception {
  RPC.setProtocolEngine(conf, RefreshUserMappingsProtocolPB.class,
      ProtobufRpcEngine.class);

  RefreshUserMappingsProtocolServerSideTranslatorPB refreshUserMappingXlator = new RefreshUserMappingsProtocolServerSideTranslatorPB(
      this);
  BlockingService refreshUserMappingService = RefreshUserMappingsProtocolService
      .newReflectiveBlockingService(refreshUserMappingXlator);

  GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = new GetUserMappingsProtocolServerSideTranslatorPB(
      this);
  BlockingService getUserMappingService = GetUserMappingsProtocolService
      .newReflectiveBlockingService(getUserMappingXlator);

  HSAdminRefreshProtocolServerSideTranslatorPB refreshHSAdminProtocolXlator = new HSAdminRefreshProtocolServerSideTranslatorPB(
      this);
  BlockingService refreshHSAdminProtocolService = HSAdminRefreshProtocolService
      .newReflectiveBlockingService(refreshHSAdminProtocolXlator);

  WritableRpcEngine.ensureInitialized();

  clientRpcAddress = conf.getSocketAddr(
      JHAdminConfig.MR_HISTORY_BIND_HOST,
      JHAdminConfig.JHS_ADMIN_ADDRESS,
      JHAdminConfig.DEFAULT_JHS_ADMIN_ADDRESS,
      JHAdminConfig.DEFAULT_JHS_ADMIN_PORT);
  clientRpcServer = new RPC.Builder(conf)
      .setProtocol(RefreshUserMappingsProtocolPB.class)
      .setInstance(refreshUserMappingService)
      .setBindAddress(clientRpcAddress.getHostName())
      .setPort(clientRpcAddress.getPort()).setVerbose(false).build();

  addProtocol(conf, GetUserMappingsProtocolPB.class, getUserMappingService);
  addProtocol(conf, HSAdminRefreshProtocolPB.class,
      refreshHSAdminProtocolService);

  // Enable service authorization?
  if (conf.getBoolean(
      CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
      false)) {
    clientRpcServer.refreshServiceAcl(conf, new ClientHSPolicyProvider());
  }

  adminAcl = new AccessControlList(conf.get(JHAdminConfig.JHS_ADMIN_ACL,
      JHAdminConfig.DEFAULT_JHS_ADMIN_ACL));

}
 
Example 17
Project: hadoop   File: LdapGroupsMapping.java   Source Code and License Vote up 4 votes
@Override
public synchronized void setConf(Configuration conf) {
  ldapUrl = conf.get(LDAP_URL_KEY, LDAP_URL_DEFAULT);
  if (ldapUrl == null || ldapUrl.isEmpty()) {
    throw new RuntimeException("LDAP URL is not configured");
  }
  
  useSsl = conf.getBoolean(LDAP_USE_SSL_KEY, LDAP_USE_SSL_DEFAULT);
  keystore = conf.get(LDAP_KEYSTORE_KEY, LDAP_KEYSTORE_DEFAULT);
  
  keystorePass = getPassword(conf, LDAP_KEYSTORE_PASSWORD_KEY,
      LDAP_KEYSTORE_PASSWORD_DEFAULT);
  if (keystorePass.isEmpty()) {
    keystorePass = extractPassword(conf.get(LDAP_KEYSTORE_PASSWORD_FILE_KEY,
        LDAP_KEYSTORE_PASSWORD_FILE_DEFAULT));
  }
  
  bindUser = conf.get(BIND_USER_KEY, BIND_USER_DEFAULT);
  bindPassword = getPassword(conf, BIND_PASSWORD_KEY, BIND_PASSWORD_DEFAULT);
  if (bindPassword.isEmpty()) {
    bindPassword = extractPassword(
        conf.get(BIND_PASSWORD_FILE_KEY, BIND_PASSWORD_FILE_DEFAULT));
  }
  
  baseDN = conf.get(BASE_DN_KEY, BASE_DN_DEFAULT);
  groupSearchFilter =
      conf.get(GROUP_SEARCH_FILTER_KEY, GROUP_SEARCH_FILTER_DEFAULT);
  userSearchFilter =
      conf.get(USER_SEARCH_FILTER_KEY, USER_SEARCH_FILTER_DEFAULT);
  groupMemberAttr =
      conf.get(GROUP_MEMBERSHIP_ATTR_KEY, GROUP_MEMBERSHIP_ATTR_DEFAULT);
  groupNameAttr =
      conf.get(GROUP_NAME_ATTR_KEY, GROUP_NAME_ATTR_DEFAULT);

  int dirSearchTimeout = conf.getInt(DIRECTORY_SEARCH_TIMEOUT, DIRECTORY_SEARCH_TIMEOUT_DEFAULT);
  SEARCH_CONTROLS.setTimeLimit(dirSearchTimeout);
  // Limit the attributes returned to only those required to speed up the search. See HADOOP-10626 for more details.
  SEARCH_CONTROLS.setReturningAttributes(new String[] {groupNameAttr});

  this.conf = conf;
}
 
Example 18
Project: aliyun-maxcompute-data-collectors   File: HsqldbJobStorage.java   Source Code and License Vote up 4 votes
private String getRootTableName() {
  Configuration conf = getConf();
  return conf.get(ROOT_TABLE_NAME_KEY, DEFAULT_ROOT_TABLE_NAME);
}
 
Example 19
Project: hadoop-oss   File: KMSPRELocalProvider.java   Source Code and License Vote up 3 votes
public ProxyReEncryptionExtension(Configuration conf,
                                  KeyProviderCryptoExtension keyProvider)
    throws URISyntaxException, IOException, NoSuchAlgorithmException
{
  this.keyProvider = keyProvider;

  final String keyName = conf.get(LOCAL_KMS_KEY_NAME_KEY, LOCAL_KMS_KEY_NAME_DEFAULT);

  final Metadata meta = keyProvider.getMetadata(keyName);

  if (meta == null) {
    localKey = keyProvider.createKeyPair(keyName, new Options(conf));
  } else {
    localKey = keyProvider.rollNewVersionPair(keyName);
  }

  localEDEKGeneration = conf.getBoolean(LOCAL_KMS_KEY_LOCAL_EDEK_GENERATION, LOCAL_KMS_KEY_LOCAL_EDEK_GENERATION_KEY);

  Metadata metadata = keyProvider.getMetadata(localKey.getName());

  localCryptoExtension = KeyProviderCryptoExtension.createKeyProviderCryptoExtension(
      new LocalKeyProvider(localKey, metadata, conf)
  );

  final String renString = conf.get(RE_KEY_PROVIDER_URI);
  final String renUrlStringList[] = renString.split(",");

  if (renUrlStringList.length == 0)
    throw new IOException("Invalid " + RE_KEY_PROVIDER_URI);

  int index = renUrlStringList.length < 2  ? 0 : new SecureRandom().nextInt(renUrlStringList.length);

  final URI renURI = new URI(renUrlStringList[index]);

  this.rekProvider = ReEncryptionKeyProviderFactory.get(renURI, conf);

}
 
Example 20
Project: hadoop   File: TotalOrderPartitioner.java   Source Code and License Vote up 2 votes
/**
 * Get the path to the SequenceFile storing the sorted partition keyset.
 * @see #setPartitionFile(Configuration, Path)
 */
public static String getPartitionFile(Configuration conf) {
  return conf.get(PARTITIONER_PATH, DEFAULT_PATH);
}