Java Code Examples for org.apache.hadoop.conf.Configuration#getLong()

The following examples show how to use org.apache.hadoop.conf.Configuration#getLong() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CommitterEventHandler.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  super.serviceInit(conf);
  commitThreadCancelTimeoutMs = conf.getInt(
      MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS,
      MRJobConfig.DEFAULT_MR_AM_COMMITTER_CANCEL_TIMEOUT_MS);
  commitWindowMs = conf.getLong(MRJobConfig.MR_AM_COMMIT_WINDOW_MS,
      MRJobConfig.DEFAULT_MR_AM_COMMIT_WINDOW_MS);
  try {
    fs = FileSystem.get(conf);
    JobID id = TypeConverter.fromYarn(context.getApplicationID());
    JobId jobId = TypeConverter.toYarn(id);
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId);
    endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user, jobId);
    endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user, jobId);
  } catch (IOException e) {
    throw new YarnRuntimeException(e);
  }
}
 
Example 2
Source File: AggregatedLogDeletionService.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void scheduleLogDeletionTask() throws IOException {
  Configuration conf = getConfig();
  if (!conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED,
      YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
    // Log aggregation is not enabled so don't bother
    return;
  }
  long retentionSecs = conf.getLong(
      YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS,
      YarnConfiguration.DEFAULT_LOG_AGGREGATION_RETAIN_SECONDS);
  if (retentionSecs < 0) {
    LOG.info("Log Aggregation deletion is disabled because retention is"
        + " too small (" + retentionSecs + ")");
    return;
  }
  setLogAggCheckIntervalMsecs(retentionSecs);
  task = new LogDeletionTask(conf, retentionSecs, creatRMClient());
  timer = new Timer();
  timer.scheduleAtFixedRate(task, 0, checkIntervalMsecs);
}
 
Example 3
Source File: CommitterEventHandler.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  super.serviceInit(conf);
  commitThreadCancelTimeoutMs = conf.getInt(
      MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS,
      MRJobConfig.DEFAULT_MR_AM_COMMITTER_CANCEL_TIMEOUT_MS);
  commitWindowMs = conf.getLong(MRJobConfig.MR_AM_COMMIT_WINDOW_MS,
      MRJobConfig.DEFAULT_MR_AM_COMMIT_WINDOW_MS);
  try {
    fs = FileSystem.get(conf);
    JobID id = TypeConverter.fromYarn(context.getApplicationID());
    JobId jobId = TypeConverter.toYarn(id);
    String user = UserGroupInformation.getCurrentUser().getShortUserName();
    startCommitFile = MRApps.getStartJobCommitFile(conf, user, jobId);
    endCommitSuccessFile = MRApps.getEndJobCommitSuccessFile(conf, user, jobId);
    endCommitFailureFile = MRApps.getEndJobCommitFailureFile(conf, user, jobId);
  } catch (IOException e) {
    throw new YarnRuntimeException(e);
  }
}
 
Example 4
Source File: IncreasingToUpperBoundRegionSplitPolicy.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
protected void configureForRegion(HRegion region) {
  super.configureForRegion(region);
  Configuration conf = getConf();
  initialSize = conf.getLong("hbase.increasing.policy.initial.size", -1);
  if (initialSize > 0) {
    return;
  }
  TableDescriptor desc = region.getTableDescriptor();
  if (desc != null) {
    initialSize = 2 * desc.getMemStoreFlushSize();
  }
  if (initialSize <= 0) {
    initialSize = 2 * conf.getLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE,
                                   TableDescriptorBuilder.DEFAULT_MEMSTORE_FLUSH_SIZE);
  }
}
 
Example 5
Source File: UpstreamManager.java    From nnproxy with Apache License 2.0 6 votes vote down vote up
public UpstreamManager(NNProxy nnProxy, Configuration conf) {
    this.nnProxy = nnProxy;
    this.conf = conf;
    final long cacheExpire =
            conf.getLong(ProxyConfig.USER_PROXY_EXPIRE_MS, ProxyConfig.USER_PROXY_EXPIRE_MS_DEFAULT);
    maxConrruentRequestPerFs =
            conf.getLong(ProxyConfig.MAX_CONCURRENT_REQUEST_PER_FS, ProxyConfig.MAX_CONCURRENT_REQUEST_PER_FS_DEFAULT);
    this.upstreamCache = CacheBuilder.<UpstreamTicket, Upstream>newBuilder()
            .expireAfterAccess(cacheExpire, TimeUnit.MILLISECONDS)
            .build(new CacheLoader<UpstreamTicket, Upstream>() {
                @Override
                public Upstream load(UpstreamTicket ticket) throws Exception {
                    return makeUpstream(ticket);
                }
            });
    this.fsRequests = new ConcurrentHashMap<>();
}
 
Example 6
Source File: GenerateData.java    From big-c with Apache License 2.0 5 votes vote down vote up
public ChunkWriter(Path outDir, Configuration conf) throws IOException {
  this.outDir = outDir;
  fs = outDir.getFileSystem(conf);
  blocksize = conf.getInt(GRIDMIX_GEN_BLOCKSIZE, 1 << 28);
  replicas = (short) conf.getInt(GRIDMIX_GEN_REPLICATION, 3);
  maxFileBytes = conf.getLong(GRIDMIX_GEN_CHUNK, 1L << 30);
  nextDestination();
}
 
Example 7
Source File: JobHistory.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  LOG.info("JobHistory Init");
  this.conf = conf;
  this.appID = ApplicationId.newInstance(0, 0);
  this.appAttemptID = RecordFactoryProvider.getRecordFactory(conf)
      .newRecordInstance(ApplicationAttemptId.class);

  moveThreadInterval = conf.getLong(
      JHAdminConfig.MR_HISTORY_MOVE_INTERVAL_MS,
      JHAdminConfig.DEFAULT_MR_HISTORY_MOVE_INTERVAL_MS);

  hsManager = createHistoryFileManager();
  hsManager.init(conf);
  try {
    hsManager.initExisting();
  } catch (IOException e) {
    throw new YarnRuntimeException("Failed to intialize existing directories", e);
  }

  storage = createHistoryStorage();
  
  if (storage instanceof Service) {
    ((Service) storage).init(conf);
  }
  storage.setHistoryFileManager(hsManager);

  super.serviceInit(conf);
}
 
Example 8
Source File: DefaultOperationQuota.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * NOTE: The order matters. It should be something like [user, table, namespace, global]
 */
public DefaultOperationQuota(final Configuration conf, final List<QuotaLimiter> limiters) {
  this.writeCapacityUnit =
      conf.getLong(QuotaUtil.WRITE_CAPACITY_UNIT_CONF_KEY, QuotaUtil.DEFAULT_WRITE_CAPACITY_UNIT);
  this.readCapacityUnit =
      conf.getLong(QuotaUtil.READ_CAPACITY_UNIT_CONF_KEY, QuotaUtil.DEFAULT_READ_CAPACITY_UNIT);
  this.limiters = limiters;
  int size = OperationType.values().length;
  operationSize = new long[size];

  for (int i = 0; i < size; ++i) {
    operationSize[i] = 0;
  }
}
 
Example 9
Source File: AerospikeConfigUtil.java    From aerospike-hadoop with Apache License 2.0 5 votes vote down vote up
public static long getInputNumRangeEnd(Configuration conf) {
    long end = conf.getLong(AerospikeConfigEnum.INPUT_NUMRANGE_END.value, INVALID_LONG);
    if (end == INVALID_LONG && getInputOperation(conf).equals("numrange"))
        throw new UnsupportedOperationException
            ("missing input numrange end");
    log.info("using " + AerospikeConfigEnum.INPUT_NUMRANGE_END.value + " = " + end);
    return end;
}
 
Example 10
Source File: CacheConfig.java    From hbase with Apache License 2.0 5 votes vote down vote up
private long getCacheCompactedBlocksOnWriteThreshold(Configuration conf) {
  long cacheCompactedBlocksOnWriteThreshold = conf
    .getLong(CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD_KEY,
      DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD);

  if (cacheCompactedBlocksOnWriteThreshold < 0) {
    LOG.warn(
      "cacheCompactedBlocksOnWriteThreshold value : {} is less than 0, resetting it to: {}",
      cacheCompactedBlocksOnWriteThreshold, DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD);
    cacheCompactedBlocksOnWriteThreshold = DEFAULT_CACHE_COMPACTED_BLOCKS_ON_WRITE_THRESHOLD;
  }

  return cacheCompactedBlocksOnWriteThreshold;
}
 
Example 11
Source File: Groups.java    From big-c with Apache License 2.0 5 votes vote down vote up
public Groups(Configuration conf, final Timer timer) {
  impl = 
    ReflectionUtils.newInstance(
        conf.getClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, 
                      ShellBasedUnixGroupsMapping.class, 
                      GroupMappingServiceProvider.class), 
        conf);

  cacheTimeout = 
    conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS, 
        CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_SECS_DEFAULT) * 1000;
  negativeCacheTimeout =
    conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS,
        CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_NEGATIVE_CACHE_SECS_DEFAULT) * 1000;
  warningDeltaMs =
    conf.getLong(CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS,
      CommonConfigurationKeys.HADOOP_SECURITY_GROUPS_CACHE_WARN_AFTER_MS_DEFAULT);
  parseStaticMapping(conf);

  this.timer = timer;
  this.cache = CacheBuilder.newBuilder()
    .refreshAfterWrite(cacheTimeout, TimeUnit.MILLISECONDS)
    .ticker(new TimerToTickerAdapter(timer))
    .expireAfterWrite(10 * cacheTimeout, TimeUnit.MILLISECONDS)
    .build(new GroupCacheLoader());

  if(negativeCacheTimeout > 0) {
    Cache<String, Boolean> tempMap = CacheBuilder.newBuilder()
      .expireAfterWrite(negativeCacheTimeout, TimeUnit.MILLISECONDS)
      .ticker(new TimerToTickerAdapter(timer))
      .build();
    negativeCache = Collections.newSetFromMap(tempMap.asMap());
  }

  if(LOG.isDebugEnabled())
    LOG.debug("Group mapping impl=" + impl.getClass().getName() + 
        "; cacheTimeout=" + cacheTimeout + "; warningDeltaMs=" +
        warningDeltaMs);
}
 
Example 12
Source File: RandomWriter.java    From tez with Apache License 2.0 5 votes vote down vote up
/**
 * Save the values out of the configuaration that we need to write
 * the data.
 */
@Override
public void setup(Context context) {
  Configuration conf = context.getConfiguration();
  numBytesToWrite = conf.getLong(BYTES_PER_MAP,
                                1*1024*1024*1024);
  minKeySize = conf.getInt(MIN_KEY, 10);
  keySizeRange = 
    conf.getInt(MAX_KEY, 1000) - minKeySize;
  minValueSize = conf.getInt(MIN_VALUE, 0);
  valueSizeRange = 
    conf.getInt(MAX_VALUE, 20000) - minValueSize;
}
 
Example 13
Source File: LocalDirsHandlerService.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Method which initializes the timertask and its interval time.
 * 
 */
@Override
protected void serviceInit(Configuration config) throws Exception {
  // Clone the configuration as we may do modifications to dirs-list
  Configuration conf = new Configuration(config);
  diskHealthCheckInterval = conf.getLong(
      YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS,
      YarnConfiguration.DEFAULT_NM_DISK_HEALTH_CHECK_INTERVAL_MS);
  monitoringTimerTask = new MonitoringTimerTask(conf);
  isDiskHealthCheckerEnabled = conf.getBoolean(
      YarnConfiguration.NM_DISK_HEALTH_CHECK_ENABLE, true);
  minNeededHealthyDisksFactor = conf.getFloat(
      YarnConfiguration.NM_MIN_HEALTHY_DISKS_FRACTION,
      YarnConfiguration.DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION);
  lastDisksCheckTime = System.currentTimeMillis();
  super.serviceInit(conf);

  FileContext localFs;
  try {
    localFs = FileContext.getLocalFSFileContext(config);
  } catch (IOException e) {
    throw new YarnRuntimeException("Unable to get the local filesystem", e);
  }
  FsPermission perm = new FsPermission((short)0755);
  boolean createSucceeded = localDirs.createNonExistentDirs(localFs, perm);
  createSucceeded &= logDirs.createNonExistentDirs(localFs, perm);
  if (!createSucceeded) {
    updateDirsAfterTest();
  }

  // Check the disk health immediately to weed out bad directories
  // before other init code attempts to use them.
  checkDirs();
}
 
Example 14
Source File: NameNodeResourceChecker.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Create a NameNodeResourceChecker, which will check the edits dirs and any
 * additional dirs to check set in <code>conf</code>.
 */
public NameNodeResourceChecker(Configuration conf) throws IOException {
  this.conf = conf;
  volumes = new HashMap<String, CheckedVolume>();

  duReserved = conf.getLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY,
      DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_DEFAULT);
  
  Collection<URI> extraCheckedVolumes = Util.stringCollectionAsURIs(conf
      .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY));
  
  Collection<URI> localEditDirs = Collections2.filter(
      FSNamesystem.getNamespaceEditsDirs(conf),
      new Predicate<URI>() {
        @Override
        public boolean apply(URI input) {
          if (input.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
            return true;
          }
          return false;
        }
      });

  // Add all the local edits dirs, marking some as required if they are
  // configured as such.
  for (URI editsDirToCheck : localEditDirs) {
    addDirToCheck(editsDirToCheck,
        FSNamesystem.getRequiredNamespaceEditsDirs(conf).contains(
            editsDirToCheck));
  }

  // All extra checked volumes are marked "required"
  for (URI extraDirToCheck : extraCheckedVolumes) {
    addDirToCheck(extraDirToCheck, true);
  }
  
  minimumRedundantVolumes = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY,
      DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_DEFAULT);
}
 
Example 15
Source File: LocalDirsHandlerService.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Method which initializes the timertask and its interval time.
 * 
 */
@Override
protected void serviceInit(Configuration config) throws Exception {
  // Clone the configuration as we may do modifications to dirs-list
  Configuration conf = new Configuration(config);
  diskHealthCheckInterval = conf.getLong(
      YarnConfiguration.NM_DISK_HEALTH_CHECK_INTERVAL_MS,
      YarnConfiguration.DEFAULT_NM_DISK_HEALTH_CHECK_INTERVAL_MS);
  monitoringTimerTask = new MonitoringTimerTask(conf);
  isDiskHealthCheckerEnabled = conf.getBoolean(
      YarnConfiguration.NM_DISK_HEALTH_CHECK_ENABLE, true);
  minNeededHealthyDisksFactor = conf.getFloat(
      YarnConfiguration.NM_MIN_HEALTHY_DISKS_FRACTION,
      YarnConfiguration.DEFAULT_NM_MIN_HEALTHY_DISKS_FRACTION);
  lastDisksCheckTime = System.currentTimeMillis();
  super.serviceInit(conf);

  FileContext localFs;
  try {
    localFs = FileContext.getLocalFSFileContext(config);
  } catch (IOException e) {
    throw new YarnRuntimeException("Unable to get the local filesystem", e);
  }
  FsPermission perm = new FsPermission((short)0755);
  boolean createSucceeded = localDirs.createNonExistentDirs(localFs, perm);
  createSucceeded &= logDirs.createNonExistentDirs(localFs, perm);
  if (!createSucceeded) {
    updateDirsAfterTest();
  }

  // Check the disk health immediately to weed out bad directories
  // before other init code attempts to use them.
  checkDirs();
}
 
Example 16
Source File: RandomWriter.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Save the values out of the configuaration that we need to write
 * the data.
 */
@Override
public void setup(Context context) {
  Configuration conf = context.getConfiguration();
  numBytesToWrite = conf.getLong(BYTES_PER_MAP,
                                1*1024*1024*1024);
  minKeySize = conf.getInt(MIN_KEY, 10);
  keySizeRange = 
    conf.getInt(MAX_KEY, 1000) - minKeySize;
  minValueSize = conf.getInt(MIN_VALUE, 0);
  valueSizeRange = 
    conf.getInt(MAX_VALUE, 20000) - minValueSize;
}
 
Example 17
Source File: JvmPauseMonitor.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public JvmPauseMonitor(Configuration conf) {
  this.warnThresholdMs = conf.getLong(WARN_THRESHOLD_KEY, WARN_THRESHOLD_DEFAULT);
  this.infoThresholdMs = conf.getLong(INFO_THRESHOLD_KEY, INFO_THRESHOLD_DEFAULT);
}
 
Example 18
Source File: DFSClient.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** 
 * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
 * If HA is enabled and a positive value is set for 
 * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
 * configuration, the DFSClient will use {@link LossyRetryInvocationHandler}
 * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode 
 * must be null.
 */
@VisibleForTesting
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
    Configuration conf, FileSystem.Statistics stats)
  throws IOException {
  SpanReceiverHost.get(conf, DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
  traceSampler = new SamplerBuilder(TraceUtils.
      wrapHadoopConf(DFSConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf)).build();
  // Copy only the required DFSClient configuration
  this.dfsClientConf = new Conf(conf);
  if (this.dfsClientConf.useLegacyBlockReaderLocal) {
    LOG.debug("Using legacy short-circuit local reads.");
  }
  this.conf = conf;
  this.stats = stats;
  this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
  this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);

  this.ugi = UserGroupInformation.getCurrentUser();
  
  this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
  this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + 
      DFSUtil.getRandom().nextInt()  + "_" + Thread.currentThread().getId();
  int numResponseToDrop = conf.getInt(
      DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
      DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
  NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null;
  AtomicBoolean nnFallbackToSimpleAuth = new AtomicBoolean(false);
  if (numResponseToDrop > 0) {
    // This case is used for testing.
    LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
        + " is set to " + numResponseToDrop
        + ", this hacked client will proactively drop responses");
    proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf,
        nameNodeUri, ClientProtocol.class, numResponseToDrop,
        nnFallbackToSimpleAuth);
  }
  
  if (proxyInfo != null) {
    this.dtService = proxyInfo.getDelegationTokenService();
    this.namenode = proxyInfo.getProxy();
  } else if (rpcNamenode != null) {
    // This case is used for testing.
    Preconditions.checkArgument(nameNodeUri == null);
    this.namenode = rpcNamenode;
    dtService = null;
  } else {
    Preconditions.checkArgument(nameNodeUri != null,
        "null URI");
    proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri,
        ClientProtocol.class, nnFallbackToSimpleAuth);
    this.dtService = proxyInfo.getDelegationTokenService();
    this.namenode = proxyInfo.getProxy();
  }

  String localInterfaces[] =
    conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
  localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
  if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
    LOG.debug("Using local interfaces [" +
    Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
    Joiner.on(',').join(localInterfaceAddrs) + "]");
  }
  
  Boolean readDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_READS) == null) ?
      null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_READS, false);
  Long readahead = (conf.get(DFS_CLIENT_CACHE_READAHEAD) == null) ?
      null : conf.getLong(DFS_CLIENT_CACHE_READAHEAD, 0);
  Boolean writeDropBehind = (conf.get(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES) == null) ?
      null : conf.getBoolean(DFS_CLIENT_CACHE_DROP_BEHIND_WRITES, false);
  this.defaultReadCachingStrategy =
      new CachingStrategy(readDropBehind, readahead);
  this.defaultWriteCachingStrategy =
      new CachingStrategy(writeDropBehind, readahead);
  this.clientContext = ClientContext.get(
      conf.get(DFS_CLIENT_CONTEXT, DFS_CLIENT_CONTEXT_DEFAULT),
      dfsClientConf);
  this.hedgedReadThresholdMillis = conf.getLong(
      DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS,
      DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THRESHOLD_MILLIS);
  int numThreads = conf.getInt(
      DFSConfigKeys.DFS_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE,
      DFSConfigKeys.DEFAULT_DFSCLIENT_HEDGED_READ_THREADPOOL_SIZE);
  if (numThreads > 0) {
    this.initThreadsNumForHedgedReads(numThreads);
  }
  this.saslClient = new SaslDataTransferClient(
    conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
    TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
}
 
Example 19
Source File: DF.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
public DF(File path, Configuration conf) throws IOException {
  this(path, conf.getLong("dfs.df.interval", DF.DF_INTERVAL_DEFAULT));
}
 
Example 20
Source File: TinyLfuBlockCache.java    From hbase with Apache License 2.0 2 votes vote down vote up
/**
 * Creates a block cache.
 *
 * @param maximumSizeInBytes maximum size of this cache, in bytes
 * @param avgBlockSize expected average size of blocks, in bytes
 * @param executor the cache's executor
 * @param conf additional configuration
 */
public TinyLfuBlockCache(long maximumSizeInBytes, long avgBlockSize,
    Executor executor, Configuration conf) {
  this(maximumSizeInBytes, avgBlockSize,
      conf.getLong(MAX_BLOCK_SIZE, DEFAULT_MAX_BLOCK_SIZE), executor);
}