Java Code Examples for org.apache.hadoop.conf.Configuration#get()

The following examples show how to use org.apache.hadoop.conf.Configuration#get() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MR4CGenericOptions.java    From mr4c with Apache License 2.0 6 votes vote down vote up
public static MR4CGenericOptions extractFromConfig(Configuration conf) {
	MR4CGenericOptions opts = new MR4CGenericOptions();
	opts.setCluster(Cluster.extractFromConfig(conf));
	String fileList = conf.get(FILE_LIST_PROP);
	if ( fileList!=null ) {
		for ( String file : fileList.split(",") ) {
			opts.addFile(URI.create(file));
		}
	}
	String jarList = conf.get(JAR_LIST_PROP);
	if ( jarList != null ) {
		for ( String jar : jarList.split(",") ) {
			opts.addJar(URI.create(jar));
		}
	}
	return opts;
}
 
Example 2
Source File: NetUtils.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/**
 * Get the socket factory for the given class according to its
 * configuration parameter
 * <tt>hadoop.rpc.socket.factory.class.&lt;ClassName&gt;</tt>. When no
 * such parameter exists then fall back on the default socket factory as
 * configured by <tt>hadoop.rpc.socket.factory.class.default</tt>. If
 * this default socket factory is not configured, then fall back on the JVM
 * default socket factory.
 * 
 * @param conf the configuration
 * @param clazz the class (usually a {@link VersionedProtocol})
 * @return a socket factory
 */
public static SocketFactory getSocketFactory(Configuration conf,
    Class<?> clazz) {

  SocketFactory factory = null;

  String propValue =
      conf.get("hadoop.rpc.socket.factory.class." + clazz.getSimpleName());
  if ((propValue != null) && (propValue.length() > 0))
    factory = getSocketFactoryFromProperty(conf, propValue);

  if (factory == null)
    factory = getDefaultSocketFactory(conf);

  return factory;
}
 
Example 3
Source File: ThroughputCounterAppTest.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Test
public void testThroughputCounterApp() throws FileNotFoundException, IOException
{
  Logger logger = LoggerFactory.getLogger(ThroughputCounterAppTest.class);
  LocalMode lm = LocalMode.newInstance();
  Configuration conf = new Configuration();
  InputStream is = new FileInputStream("src/site/conf/dt-site-testbench.xml");
  conf.addResource(is);
  conf.get("dt.application.ThroughputCounterApp.operator.hmapOper.keys");
  conf.get("dt.application.ThroughputCounterApp.operator.hmapOper.numKeys");
  try {
    lm.prepareDAG(new ThroughputCounterApp(), conf);
    LocalMode.Controller lc = lm.getController();
    lc.run(20000);
  } catch (Exception ex) {
    logger.info(ex.getMessage());
  }
  is.close();
}
 
Example 4
Source File: HadoopUtils.java    From mr4c with Apache License 2.0 5 votes vote down vote up
/**
  * Generates human readable string with property name, value, and source
*/
public static String describeConfProp(Configuration conf, String name) {
	String val = conf.get(name);
	String[] srcs = conf.getPropertySources(name);
	String source = srcs==null ? "unknown" : Arrays.toString(srcs);
	return String.format("%s=%s; source: %s", name, val, source);
}
 
Example 5
Source File: ConfigUtils.java    From attic-apex-core with Apache License 2.0 5 votes vote down vote up
public static String getRMUsername(Configuration conf)
{
  String principal = null;
  if (UserGroupInformation.isSecurityEnabled()) {
    principal = conf.get(YarnConfiguration.RM_PRINCIPAL);
    int sindex = -1;
    if ((principal != null) && ((sindex = principal.indexOf('/')) != -1)) {
      principal = principal.substring(0, sindex);
    }
  }
  return principal;
}
 
Example 6
Source File: HDFSUtil.java    From eagle with Apache License 2.0 5 votes vote down vote up
public static void login(Configuration kConfig) throws IOException {
    if (kConfig.get("hdfs.kerberos.principal") == null || kConfig.get("hdfs.kerberos.principal").isEmpty()) {
        if (kConfig.get("hadoop.job.ugi") != null) {
            System.setProperty("HADOOP_USER_NAME", kConfig.get("hadoop.job.ugi"));
        }
        return;
    }
    kConfig.setBoolean("hadoop.security.authorization", true);
    kConfig.set("hadoop.security.authentication", "kerberos");
    UserGroupInformation.setConfiguration(kConfig);
    UserGroupInformation.loginUserFromKeytab(kConfig.get("hdfs.kerberos.principal"), kConfig.get("hdfs.keytab.file"));
}
 
Example 7
Source File: OsSecureRandom.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
synchronized public void setConf(Configuration conf) {
  this.conf = conf;
  this.randomDevPath = conf.get(
      HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_KEY,
      HADOOP_SECURITY_SECURE_RANDOM_DEVICE_FILE_PATH_DEFAULT);
  close();
}
 
Example 8
Source File: AbstractRpcClient.java    From hbase with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
public static String getDefaultCodec(final Configuration c) {
  // If "hbase.client.default.rpc.codec" is empty string -- you can't set it to null because
  // Configuration will complain -- then no default codec (and we'll pb everything). Else
  // default is KeyValueCodec
  return c.get(DEFAULT_CODEC_CLASS, KeyValueCodec.class.getCanonicalName());
}
 
Example 9
Source File: GeoWaveInputConfigurator.java    From geowave with Apache License 2.0 5 votes vote down vote up
public static Index getIndex(final Class<?> implementingClass, final Configuration config) {
  final String input = config.get(enumToConfKey(implementingClass, GeoWaveConfg.INDEX));
  if (input != null) {
    final byte[] indexBytes = ByteArrayUtils.byteArrayFromString(input);
    return (Index) PersistenceUtils.fromBinary(indexBytes);
  }
  return null;
}
 
Example 10
Source File: HalvadeConf.java    From halvade with GNU General Public License v3.0 5 votes vote down vote up
public static boolean getUpdateReadGroup(Configuration conf) {
    String s = conf.get(updateRG);
    if(s.equalsIgnoreCase("true"))
        return true;
    else 
        return false;
}
 
Example 11
Source File: DremioFileSystem.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
private void updateAzureConfiguration(Configuration conf, URI uri) {
  // default is key based, same as azure sources
  String accountName = getAccountNameFromURI(conf.get("authority"), uri);
  // strip any url information if any
  String accountNameWithoutSuffix = accountName.split("[.]")[0];
  conf.set("dremio.azure.account", accountNameWithoutSuffix);
  String authType = getAuthTypeForAccount(conf, accountName, accountNameWithoutSuffix);
  String key = null;

  String old_scheme = conf.get("old_scheme");
  if (old_scheme.equals(FileSystemUriSchemes.WASB_SCHEME) || old_scheme.equals(FileSystemUriSchemes.WASB_SECURE_SCHEME)) {
    conf.setIfUnset("dremio.azure.mode","STORAGE_V1");
  } else if (old_scheme.equals(FileSystemUriSchemes.ABFS_SCHEME) || old_scheme.equals(FileSystemUriSchemes.ABFS_SECURE_SCHEME)) {
    conf.setIfUnset("dremio.azure.mode","STORAGE_V2");
  }

  if (authType.equals(AuthType.SharedKey.name())) {
    key = getValueForProperty(conf, FS_AZURE_ACCOUNT_KEY_PROPERTY_NAME, accountName,
      accountNameWithoutSuffix, "Account Key not present in the configuration.");
    conf.set("dremio.azure.key", key);
    conf.set("dremio.azure.credentialsType", "ACCESS_KEY");
  } else if (authType.equals(AuthType.OAuth.name())) {
    updateOAuthConfig(conf, accountName, accountNameWithoutSuffix);
    conf.set("dremio.azure.credentialsType", "AZURE_ACTIVE_DIRECTORY");
  } else {
    throw new UnsupportedOperationException("This credentials type is not supported " + authType);
  }

}
 
Example 12
Source File: ClustersNames.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
/**
 * Returns the cluster name on which the application is running. Uses Hadoop configuration passed in to get the
 * url of the resourceManager or jobtracker. The URL is then translated into a human readable cluster name using
 * {@link #getClusterName(String)}
 *
 * <p>
 * <b>MapReduce mode</b> Uses the value for "yarn.resourcemanager.address" from {@link Configuration} excluding the
 * port number.
 * </p>
 *
 * <p>
 * <b>Standalone mode (outside of hadoop)</b> Uses the Hostname of {@link InetAddress#getLocalHost()}
 * </p>
 *
 * <p>
 * Use {@link #getClusterName(String)} if you already have the cluster URL
 * </p>
 *
 * @see #getClusterName()
 * @param conf Hadoop configuration to use to get resourceManager or jobTracker URLs
 */
public String getClusterName(Configuration conf) {
  // ResourceManager address in Hadoop2
  String clusterIdentifier = conf.get("yarn.resourcemanager.address");
  clusterIdentifier = getClusterName(clusterIdentifier);

  // If job is running outside of Hadoop (Standalone) use hostname
  // If clusterIdentifier is localhost or 0.0.0.0 use hostname
  if (clusterIdentifier == null || StringUtils.startsWithIgnoreCase(clusterIdentifier, "localhost")
      || StringUtils.startsWithIgnoreCase(clusterIdentifier, "0.0.0.0")) {
    try {
      clusterIdentifier = InetAddress.getLocalHost().getHostName();
    } catch (UnknownHostException e) {
      // Do nothing. Tag will not be generated
    }
  }

  return clusterIdentifier;
}
 
Example 13
Source File: ConfigHelper.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
public static String getInputKeyspace(Configuration conf)
{
    return conf.get(INPUT_KEYSPACE_CONFIG);
}
 
Example 14
Source File: Master.java    From incubator-tez with Apache License 2.0 4 votes vote down vote up
public static String getMasterUserName(Configuration conf) {
  return conf.get(YarnConfiguration.RM_PRINCIPAL);
}
 
Example 15
Source File: ConfigUtil.java    From zerowing with MIT License 4 votes vote down vote up
public static String getTemporaryHFilePath(Configuration conf) {
  return conf.get(TEMPORARY_HFILE_PATH, DEFAULT_TEMPORARY_HFILE_PATH);
}
 
Example 16
Source File: ContainerManagerImpl.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
protected void serviceStart() throws Exception {

  // Enqueue user dirs in deletion context

  Configuration conf = getConfig();
  final InetSocketAddress initialAddress = conf.getSocketAddr(
      YarnConfiguration.NM_BIND_HOST,
      YarnConfiguration.NM_ADDRESS,
      YarnConfiguration.DEFAULT_NM_ADDRESS,
      YarnConfiguration.DEFAULT_NM_PORT);
  boolean usingEphemeralPort = (initialAddress.getPort() == 0);
  if (context.getNMStateStore().canRecover() && usingEphemeralPort) {
    throw new IllegalArgumentException("Cannot support recovery with an "
        + "ephemeral server port. Check the setting of "
        + YarnConfiguration.NM_ADDRESS);
  }
  // If recovering then delay opening the RPC service until the recovery
  // of resources and containers have completed, otherwise requests from
  // clients during recovery can interfere with the recovery process.
  final boolean delayedRpcServerStart =
      context.getNMStateStore().canRecover();

  Configuration serverConf = new Configuration(conf);

  // always enforce it to be token-based.
  serverConf.set(
    CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
    SaslRpcServer.AuthMethod.TOKEN.toString());
  
  YarnRPC rpc = YarnRPC.create(conf);

  server =
      rpc.getServer(ContainerManagementProtocol.class, this, initialAddress, 
          serverConf, this.context.getNMTokenSecretManager(),
          conf.getInt(YarnConfiguration.NM_CONTAINER_MGR_THREAD_COUNT, 
              YarnConfiguration.DEFAULT_NM_CONTAINER_MGR_THREAD_COUNT));
  
  // Enable service authorization?
  if (conf.getBoolean(
      CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
      false)) {
    refreshServiceAcls(conf, new NMPolicyProvider());
  }
  
  LOG.info("Blocking new container-requests as container manager rpc" +
  		" server is still starting.");
  this.setBlockNewContainerRequests(true);

  String bindHost = conf.get(YarnConfiguration.NM_BIND_HOST);
  String nmAddress = conf.getTrimmed(YarnConfiguration.NM_ADDRESS);
  String hostOverride = null;
  if (bindHost != null && !bindHost.isEmpty()
      && nmAddress != null && !nmAddress.isEmpty()) {
    //a bind-host case with an address, to support overriding the first
    //hostname found when querying for our hostname with the specified
    //address, combine the specified address with the actual port listened
    //on by the server
    hostOverride = nmAddress.split(":")[0];
  }

  // setup node ID
  InetSocketAddress connectAddress;
  if (delayedRpcServerStart) {
    connectAddress = NetUtils.getConnectAddress(initialAddress);
  } else {
    server.start();
    connectAddress = NetUtils.getConnectAddress(server);
  }
  NodeId nodeId = buildNodeId(connectAddress, hostOverride);
  ((NodeManager.NMContext)context).setNodeId(nodeId);
  this.context.getNMTokenSecretManager().setNodeId(nodeId);
  this.context.getContainerTokenSecretManager().setNodeId(nodeId);

  // start remaining services
  super.serviceStart();

  if (delayedRpcServerStart) {
    waitForRecoveredContainers();
    server.start();

    // check that the node ID is as previously advertised
    connectAddress = NetUtils.getConnectAddress(server);
    NodeId serverNode = buildNodeId(connectAddress, hostOverride);
    if (!serverNode.equals(nodeId)) {
      throw new IOException("Node mismatch after server started, expected '"
          + nodeId + "' but found '" + serverNode + "'");
    }
  }

  LOG.info("ContainerManager started at " + connectAddress);
  LOG.info("ContainerManager bound to " + initialAddress);
}
 
Example 17
Source File: DecommissionManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Start the decommission monitor thread.
 * @param conf
 */
void activate(Configuration conf) {
  final int intervalSecs =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY,
          DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_DEFAULT);
  checkArgument(intervalSecs >= 0, "Cannot set a negative " +
      "value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY);

  // By default, the new configuration key overrides the deprecated one.
  // No # node limit is set.
  int blocksPerInterval = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY,
      DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_DEFAULT);
  int nodesPerInterval = Integer.MAX_VALUE;

  // If the expected key isn't present and the deprecated one is, 
  // use the deprecated one into the new one. This overrides the 
  // default.
  //
  // Also print a deprecation warning.
  final String deprecatedKey =
      "dfs.namenode.decommission.nodes.per.interval";
  final String strNodes = conf.get(deprecatedKey);
  if (strNodes != null) {
    nodesPerInterval = Integer.parseInt(strNodes);
    blocksPerInterval = Integer.MAX_VALUE;
    LOG.warn("Using deprecated configuration key {} value of {}.",
        deprecatedKey, nodesPerInterval); 
    LOG.warn("Please update your configuration to use {} instead.", 
        DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);
  }
  checkArgument(blocksPerInterval > 0,
      "Must set a positive value for "
      + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_BLOCKS_PER_INTERVAL_KEY);

  final int maxConcurrentTrackedNodes = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES,
      DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT);
  checkArgument(maxConcurrentTrackedNodes >= 0, "Cannot set a negative " +
      "value for "
      + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES);

  monitor = new Monitor(blocksPerInterval, 
      nodesPerInterval, maxConcurrentTrackedNodes);
  executor.scheduleAtFixedRate(monitor, intervalSecs, intervalSecs,
      TimeUnit.SECONDS);

  LOG.debug("Activating DecommissionManager with interval {} seconds, " +
          "{} max blocks per interval, {} max nodes per interval, " +
          "{} max concurrently tracked nodes.", intervalSecs,
      blocksPerInterval, nodesPerInterval, maxConcurrentTrackedNodes);
}
 
Example 18
Source File: CacheConfig.java    From rubix with Apache License 2.0 4 votes vote down vote up
public static String getGangliaMetricsHost(Configuration conf)
{
  return conf.get(KEY_METRICS_GANGLIA_HOST, DEFAULT_METRICS_GANGLIA_HOST);
}
 
Example 19
Source File: OrcFlowFileWriter.java    From nifi with Apache License 2.0 4 votes vote down vote up
public OrcFlowFileWriter(OutputStream flowFileOutputStream,
                         Path path,
                         Configuration conf,
                         ObjectInspector inspector,
                         long stripeSize,
                         CompressionKind compress,
                         int bufferSize,
                         int rowIndexStride,
                         MemoryManager memoryManager,
                         boolean addBlockPadding,
                         OrcFile.Version version,
                         OrcFile.WriterCallback callback,
                         EncodingStrategy encodingStrategy,
                         CompressionStrategy compressionStrategy,
                         float paddingTolerance,
                         long blockSizeValue,
                         String bloomFilterColumnNames,
                         double bloomFilterFpp) throws IOException {
    this.flowFileOutputStream = flowFileOutputStream;
    this.path = path;
    this.conf = conf;
    this.callback = callback;
    callbackContext = (callback != null) ? () -> OrcFlowFileWriter.this : null;
    this.adjustedStripeSize = stripeSize;
    this.defaultStripeSize = stripeSize;
    this.version = version;
    this.encodingStrategy = encodingStrategy;
    this.compressionStrategy = compressionStrategy;
    this.addBlockPadding = addBlockPadding;
    this.blockSize = blockSizeValue;
    this.paddingTolerance = paddingTolerance;
    this.compress = compress;
    this.rowIndexStride = rowIndexStride;
    this.memoryManager = memoryManager;
    buildIndex = rowIndexStride > 0;
    codec = createCodec(compress);
    String allColumns = conf.get(IOConstants.COLUMNS);
    if (allColumns == null) {
        allColumns = getColumnNamesFromInspector(inspector);
    }
    this.bufferSize = getEstimatedBufferSize(allColumns, bufferSize);
    if (version == OrcFile.Version.V_0_11) {
        /* do not write bloom filters for ORC v11 */
        this.bloomFilterColumns =
                OrcUtils.includeColumns(null, allColumns, inspector);
    } else {
        this.bloomFilterColumns =
                OrcUtils.includeColumns(bloomFilterColumnNames, allColumns, inspector);
    }
    this.bloomFilterFpp = bloomFilterFpp;
    treeWriter = createTreeWriter(inspector, streamFactory, false);
    if (buildIndex && rowIndexStride < MIN_ROW_INDEX_STRIDE) {
        throw new IllegalArgumentException("Row stride must be at least " +
                MIN_ROW_INDEX_STRIDE);
    }

    // ensure that we are able to handle callbacks before we register ourselves
    memoryManager.addWriter(path, stripeSize, this);
}
 
Example 20
Source File: DFSUtil.java    From RDFS with Apache License 2.0 3 votes vote down vote up
/** 
 * Sets the node specific setting into generic configuration key. Looks up
 * value of "key.nameserviceId" and if found sets that value into generic key 
 * in the conf. Note that this only modifies the runtime conf.
 * 
 * @param conf
 *          Configuration object to lookup specific key and to set the value
 *          to the key passed. Note the conf object is modified.
 * @param nameserviceId
 *          nameservice Id to construct the node specific key.
 * @param keys
 *          The key for which node specific value is looked up
 */
public static void setGenericConf(Configuration conf,
    String nameserviceId, String... keys) {
  for (String key : keys) {
    String value = conf.get(getNameServiceIdKey(key, nameserviceId));
    if (value != null) {
      conf.set(key, value);
    }
  }
}