Java Code Examples for org.apache.hadoop.conf.Configuration.getTrimmedStrings()

The following are Jave code examples for showing how to use getTrimmedStrings() of the org.apache.hadoop.conf.Configuration class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop-oss   File: SerializationFactory.java   Source Code and License Vote up 6 votes
/**
 * <p>
 * Serializations are found by reading the <code>io.serializations</code>
 * property from <code>conf</code>, which is a comma-delimited list of
 * classnames.
 * </p>
 */
public SerializationFactory(Configuration conf) {
  super(conf);
  if (conf.get(CommonConfigurationKeys.IO_SERIALIZATIONS_KEY).equals("")) {
    LOG.warn("Serialization for various data types may not be available. Please configure "
        + CommonConfigurationKeys.IO_SERIALIZATIONS_KEY
        + " properly to have serialization support (it is currently not set).");
  } else {
    for (String serializerName : conf.getTrimmedStrings(
        CommonConfigurationKeys.IO_SERIALIZATIONS_KEY, new String[] {
            WritableSerialization.class.getName(),
            AvroSpecificSerialization.class.getName(),
            AvroReflectSerialization.class.getName() })) {
      add(conf, serializerName);
    }
  }
}
 
Example 2
Project: hadoop   File: TopConf.java   Source Code and License Vote up 6 votes
public TopConf(Configuration conf) {
  isEnabled = conf.getBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY,
      DFSConfigKeys.NNTOP_ENABLED_DEFAULT);
  String[] periodsStr = conf.getTrimmedStrings(
      DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY,
      DFSConfigKeys.NNTOP_WINDOWS_MINUTES_DEFAULT);
  nntopReportingPeriodsMs = new int[periodsStr.length];
  for (int i = 0; i < periodsStr.length; i++) {
    nntopReportingPeriodsMs[i] = Ints.checkedCast(
        TimeUnit.MINUTES.toMillis(Integer.parseInt(periodsStr[i])));
  }
  for (int aPeriodMs: nntopReportingPeriodsMs) {
    Preconditions.checkArgument(aPeriodMs >= TimeUnit.MINUTES.toMillis(1),
        "minimum reporting period is 1 min!");
  }
}
 
Example 3
Project: hadoop-oss   File: SaslPropertiesResolver.java   Source Code and License Vote up 5 votes
@Override
public void setConf(Configuration conf) {
  this.conf = conf;
  properties = new TreeMap<String,String>();
  String[] qop = conf.getTrimmedStrings(
      CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION,
      QualityOfProtection.AUTHENTICATION.toString());
  for (int i=0; i < qop.length; i++) {
    qop[i] = QualityOfProtection.valueOf(
        StringUtils.toUpperCase(qop[i])).getSaslQop();
  }
  properties.put(Sasl.QOP, StringUtils.join(",", qop));
  properties.put(Sasl.SERVER_AUTH, "true");
}
 
Example 4
Project: hadoop-oss   File: ProxyServers.java   Source Code and License Vote up 5 votes
public static void refresh(Configuration conf){
  Collection<String> tempServers = new HashSet<String>();
  // trusted proxy servers such as http proxies
  for (String host : conf.getTrimmedStrings(CONF_HADOOP_PROXYSERVERS)) {
    InetSocketAddress addr = new InetSocketAddress(host, 0);
    if (!addr.isUnresolved()) {
      tempServers.add(addr.getAddress().getHostAddress());
    }
  }
  proxyServers = tempServers;
}
 
Example 5
Project: hadoop   File: SaslPropertiesResolver.java   Source Code and License Vote up 5 votes
@Override
public void setConf(Configuration conf) {
  this.conf = conf;
  properties = new TreeMap<String,String>();
  String[] qop = conf.getTrimmedStrings(
      CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION,
      QualityOfProtection.AUTHENTICATION.toString());
  for (int i=0; i < qop.length; i++) {
    qop[i] = QualityOfProtection.valueOf(
        StringUtils.toUpperCase(qop[i])).getSaslQop();
  }
  properties.put(Sasl.QOP, StringUtils.join(",", qop));
  properties.put(Sasl.SERVER_AUTH, "true");
}
 
Example 6
Project: hadoop   File: ProxyServers.java   Source Code and License Vote up 5 votes
public static void refresh(Configuration conf){
  Collection<String> tempServers = new HashSet<String>();
  // trusted proxy servers such as http proxies
  for (String host : conf.getTrimmedStrings(CONF_HADOOP_PROXYSERVERS)) {
    InetSocketAddress addr = new InetSocketAddress(host, 0);
    if (!addr.isUnresolved()) {
      tempServers.add(addr.getAddress().getHostAddress());
    }
  }
  proxyServers = tempServers;
}
 
Example 7
Project: hadoop   File: SpanReceiverHost.java   Source Code and License Vote up 5 votes
/**
 * Reads the names of classes specified in the
 * "hadoop.htrace.spanreceiver.classes" property and instantiates and registers
 * them with the Tracer as SpanReceiver's.
 *
 * The nullary constructor is called during construction, but if the classes
 * specified implement the Configurable interface, setConfiguration() will be
 * called on them. This allows SpanReceivers to use values from the Hadoop
 * configuration.
 */
public synchronized void loadSpanReceivers(Configuration conf) {
  config = new Configuration(conf);
  String receiverKey = confPrefix + SPAN_RECEIVERS_CONF_SUFFIX;
  String[] receiverNames = config.getTrimmedStrings(receiverKey);
  if (receiverNames == null || receiverNames.length == 0) {
    if (LOG.isTraceEnabled()) {
      LOG.trace("No span receiver names found in " + receiverKey + ".");
    }
    return;
  }
  // It's convenient to have each daemon log to a random trace file when
  // testing.
  String pathKey = confPrefix + LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX;
  if (config.get(pathKey) == null) {
    String uniqueFile = getUniqueLocalTraceFileName();
    config.set(pathKey, uniqueFile);
    if (LOG.isTraceEnabled()) {
      LOG.trace("Set " + pathKey + " to " + uniqueFile);
    }
  }
  for (String className : receiverNames) {
    try {
      SpanReceiver rcvr = loadInstance(className, EMPTY);
      Trace.addReceiver(rcvr);
      receivers.put(highestId++, rcvr);
      LOG.info("Loaded SpanReceiver " + className + " successfully.");
    } catch (IOException e) {
      LOG.error("Failed to load SpanReceiver", e);
    }
  }
}
 
Example 8
Project: hadoop   File: SerializationFactory.java   Source Code and License Vote up 5 votes
/**
 * <p>
 * Serializations are found by reading the <code>io.serializations</code>
 * property from <code>conf</code>, which is a comma-delimited list of
 * classnames.
 * </p>
 */
public SerializationFactory(Configuration conf) {
  super(conf);
  for (String serializerName : conf.getTrimmedStrings(
    CommonConfigurationKeys.IO_SERIALIZATIONS_KEY,
    new String[]{WritableSerialization.class.getName(),
      AvroSpecificSerialization.class.getName(),
      AvroReflectSerialization.class.getName()})) {
    add(conf, serializerName);
  }
}
 
Example 9
Project: hadoop-oss   File: NuCypherExtClient.java   Source Code and License Vote up 4 votes
/**
  * Create a new NuCypherExtClient connected to the given nameNodeUri or rpcNamenode.
  * If HA is enabled and a positive value is set for
  * {@link HdfsClientConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY}
  * in the configuration, the DFSClient will use
  * {@link LossyRetryInvocationHandler} as its RetryInvocationHandler.
  * Otherwise one of nameNodeUri or rpcNamenode must be null.
  */
 @VisibleForTesting
 public NuCypherExtClient(URI nameNodeUri, NuCypherExtClientProtocol rpcNamenode,
                        Configuration conf, FileSystem.Statistics stats) throws IOException {
//   this.dfsClientConf = new DfsClientConf(conf);
   this.conf = conf;
   this.stats = stats;
   this.socketFactory = NetUtils.getSocketFactory(conf, NuCypherExtClientProtocol.class);

   this.ugi = UserGroupInformation.getCurrentUser();

   this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
   this.clientName = "NuCypherExtClient_" + conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE") + "_" +
       ThreadLocalRandom.current().nextInt()  + "_" +
       Thread.currentThread().getId();
   int numResponseToDrop = conf.getInt(
       DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
       DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
   ProxyAndInfo<NuCypherExtClientProtocol> proxyInfo = null;
   AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);

   if (numResponseToDrop > 0) {
     // This case is used for testing.
     LOG.warn(DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
         + " is set to " + numResponseToDrop
         + ", this hacked client will proactively drop responses");
     proxyInfo = NuCypherExtNameNodeProxiesClient.createProxyWithLossyRetryHandler(conf,
         nameNodeUri, NuCypherExtClientProtocol.class, numResponseToDrop,
         nnFallbackToSimpleAuth);
   }

   if (proxyInfo != null) {
     // this.dtService = proxyInfo.getDelegationTokenService();
     this.namenode = proxyInfo.getProxy();
   } else if (rpcNamenode != null) {
     // This case is used for testing.
     Preconditions.checkArgument(nameNodeUri == null);
     this.namenode = rpcNamenode;
     //dtService = null;
   } else {
     Preconditions.checkArgument(nameNodeUri != null,
         "null URI");
     proxyInfo = NuCypherExtNameNodeProxiesClient.createProxyWithNuCypherExtClientProtocol(conf,
         nameNodeUri, nnFallbackToSimpleAuth);
     // this.dtService = proxyInfo.getDelegationTokenService();
     this.namenode = proxyInfo.getProxy();
   }

   String localInterfaces[] =
       conf.getTrimmedStrings(DFS_CLIENT_LOCAL_INTERFACES);
   localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
   if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
     LOG.debug("Using local interfaces [" +
         Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
         Joiner.on(',').join(localInterfaceAddrs) + "]");
   }
 }
 
Example 10
Project: hadoop   File: MRApps.java   Source Code and License Vote up 4 votes
@VisibleForTesting
static String[] getSystemClasses(Configuration conf) {
  return conf.getTrimmedStrings(
      MRJobConfig.MAPREDUCE_JOB_CLASSLOADER_SYSTEM_CLASSES);
}
 
Example 11
Project: hadoop   File: FsDatasetImpl.java   Source Code and License Vote up 4 votes
/**
 * An FSDataset has a directory where it loads its data files.
 */
FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf
    ) throws IOException {
  this.fsRunning = true;
  this.datanode = datanode;
  this.dataStorage = storage;
  this.conf = conf;
  // The number of volumes required for operation is the total number 
  // of volumes minus the number of failed volumes we can tolerate.
  final int volFailuresTolerated =
    conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
                DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);

  String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
  Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
  List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
      dataLocations, storage);

  int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
  int volsFailed = volumeFailureInfos.size();
  this.validVolsRequired = volsConfigured - volFailuresTolerated;

  if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
    throw new DiskErrorException("Invalid volume failure "
        + " config value: " + volFailuresTolerated);
  }
  if (volsFailed > volFailuresTolerated) {
    throw new DiskErrorException("Too many failed volumes - "
        + "current valid volumes: " + storage.getNumStorageDirs() 
        + ", volumes configured: " + volsConfigured 
        + ", volumes failed: " + volsFailed
        + ", volume failures tolerated: " + volFailuresTolerated);
  }

  storageMap = new ConcurrentHashMap<String, DatanodeStorage>();
  volumeMap = new ReplicaMap(this);
  ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this);

  @SuppressWarnings("unchecked")
  final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
      ReflectionUtils.newInstance(conf.getClass(
          DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
          RoundRobinVolumeChoosingPolicy.class,
          VolumeChoosingPolicy.class), conf);
  volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
      blockChooserImpl);
  asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);
  asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode);
  deletingBlock = new HashMap<String, Set<Long>>();

  for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
    addVolume(dataLocations, storage.getStorageDir(idx));
  }
  setupAsyncLazyPersistThreads();

  cacheManager = new FsDatasetCache(this);

  // Start the lazy writer once we have built the replica maps.
  lazyWriter = new Daemon(new LazyWriter(conf));
  lazyWriter.start();
  registerMBean(datanode.getDatanodeUuid());
  localFS = FileSystem.getLocal(conf);
  blockPinningEnabled = conf.getBoolean(
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT);
}