Java Code Examples for org.apache.hadoop.conf.Configuration#getFloat()

The following examples show how to use org.apache.hadoop.conf.Configuration#getFloat() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
Source Project: big-c   File: TrashPolicyDefault.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void initialize(Configuration conf, FileSystem fs, Path home) {
  this.fs = fs;
  this.trash = new Path(home, TRASH);
  this.homesParent = home.getParent();
  this.current = new Path(trash, CURRENT);
  this.deletionInterval = (long)(conf.getFloat(
      FS_TRASH_INTERVAL_KEY, FS_TRASH_INTERVAL_DEFAULT)
      * MSECS_PER_MINUTE);
  this.emptierInterval = (long)(conf.getFloat(
      FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
      * MSECS_PER_MINUTE);
  LOG.info("Namenode trash configuration: Deletion interval = " +
           (this.deletionInterval / MSECS_PER_MINUTE) + " minutes, Emptier interval = " +
           (this.emptierInterval / MSECS_PER_MINUTE) + " minutes.");
 }
 
Example 2
Source Project: hbase   File: MemorySizeUtil.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @param conf used to read cache configs
 * @return the number of bytes to use for LRU, negative if disabled.
 * @throws IllegalArgumentException if HFILE_BLOCK_CACHE_SIZE_KEY is > 1.0
 */
public static long getOnHeapCacheSize(final Configuration conf) {
  float cachePercentage = conf.getFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY,
    HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT);
  if (cachePercentage <= 0.0001f) {
    return -1;
  }
  if (cachePercentage > 1.0) {
    throw new IllegalArgumentException(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY +
      " must be between 0.0 and 1.0, and not > 1.0");
  }
  long max = -1L;
  final MemoryUsage usage = safeGetHeapMemoryUsage();
  if (usage != null) {
    max = usage.getMax();
  }

  // Calculate the amount of heap to give the heap.
  return (long) (max * cachePercentage);
}
 
Example 3
public CryptoExtension(Configuration conf,
    KeyProviderCryptoExtension keyProviderCryptoExtension) {
  this.keyProviderCryptoExtension = keyProviderCryptoExtension;
  encKeyVersionQueue =
      new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>(
          conf.getInt(KMS_KEY_CACHE_SIZE,
              KMS_KEY_CACHE_SIZE_DEFAULT),
          conf.getFloat(KMS_KEY_CACHE_LOW_WATERMARK,
              KMS_KEY_CACHE_LOW_WATERMARK_DEFAULT),
          conf.getInt(KMS_KEY_CACHE_EXPIRY_MS,
              KMS_KEY_CACHE_EXPIRY_DEFAULT),
          conf.getInt(KMS_KEY_CACHE_NUM_REFILL_THREADS,
              KMS_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
          SyncGenerationPolicy.LOW_WATERMARK, new EncryptedQueueRefiller()
      );
}
 
Example 4
Source Project: RDFS   File: DatanodeBenThread.java    License: Apache License 2.0 5 votes vote down vote up
public DatanodeBenThread(Configuration conf, Path input, Path output, int id,
    RUNNING_TYPE init_type, DatanodeBenRunTimeConstants rtc) throws IOException{
  super(conf, input, output, rtc);
  this.rtc = rtc;
  this.replication = (short)conf.getInt(REPLICATION_KEY, DEFAULT_REPLICATION_NUM);
  this.max_size = conf.getLong(FILE_SIZE_KEY, DEFAULT_FILE_SIZE) * 1024 * 1024;
  this.pread = conf.getFloat(READ_PERCENT_KEY, DEFAULT_READ_PERCENT);
  this.tb = new TokenBucket(rtc.data_rate);
  this.id = id;
  this.thread_name = rtc.task_name + "_" + id;
  this.running_type = init_type;
  if (running_type.equals(RUNNING_TYPE.PREPARE)) {
    this.file_prefix = rtc.cur_datanode + thread_name +  "_part";
  } else {
    this.file_prefix = thread_name + "_part";
    this.nsPickLists = rtc.pickLists.get(conf.get(FileSystem.FS_DEFAULT_NAME_KEY));
    this.dfs = (DistributedFileSystem)fs;
    float f = rb.nextFloat();
    if (f < pread + 1e-9) {
      this.running_type = RUNNING_TYPE.READ;
    } else {
      this.outputPath = new Path(outputPath, thread_name);
      this.running_type = RUNNING_TYPE.WRITE;
    }
  }
  fs.mkdirs(this.outputPath); 
}
 
Example 5
Source Project: RDFS   File: DF.java    License: Apache License 2.0 5 votes vote down vote up
public static long getReservedSpace(File path, Configuration conf) {
  long reservedBytes = conf.getLong("dfs.datanode.du.reserved", 0);
  double reservedPercent = conf
      .getFloat("dfs.datanode.du.reserved.percent", 0);
  return Math.max(reservedBytes, (long) (path.getTotalSpace()
      * (reservedPercent / 100.0)));
}
 
Example 6
Source Project: anthelion   File: OPICScoringFilter.java    License: Apache License 2.0 5 votes vote down vote up
public void setConf(Configuration conf) {
  this.conf = conf;
  scorePower = conf.getFloat("indexer.score.power", 0.5f);
  internalScoreFactor = conf.getFloat("db.score.link.internal", 1.0f);
  externalScoreFactor = conf.getFloat("db.score.link.external", 1.0f);
  countFiltered = conf.getBoolean("db.score.count.filtered", false);
}
 
Example 7
Source Project: hadoop   File: RMContainerAllocator.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  super.serviceInit(conf);
  reduceSlowStart = conf.getFloat(
      MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 
      DEFAULT_COMPLETED_MAPS_PERCENT_FOR_REDUCE_SLOWSTART);
  maxReduceRampupLimit = conf.getFloat(
      MRJobConfig.MR_AM_JOB_REDUCE_RAMPUP_UP_LIMIT, 
      MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_RAMP_UP_LIMIT);
  maxReducePreemptionLimit = conf.getFloat(
      MRJobConfig.MR_AM_JOB_REDUCE_PREEMPTION_LIMIT,
      MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_PREEMPTION_LIMIT);
  allocationDelayThresholdMs = conf.getInt(
      MRJobConfig.MR_JOB_REDUCER_PREEMPT_DELAY_SEC,
      MRJobConfig.DEFAULT_MR_JOB_REDUCER_PREEMPT_DELAY_SEC) * 1000;//sec -> ms
  maxRunningMaps = conf.getInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT,
      MRJobConfig.DEFAULT_JOB_RUNNING_MAP_LIMIT);
  maxRunningReduces = conf.getInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT,
      MRJobConfig.DEFAULT_JOB_RUNNING_REDUCE_LIMIT);
  RackResolver.init(conf);
  retryInterval = getConfig().getLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS,
                              MRJobConfig.DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS);
  mapNodeLabelExpression = conf.get(MRJobConfig.MAP_NODE_LABEL_EXP);
  reduceNodeLabelExpression = conf.get(MRJobConfig.REDUCE_NODE_LABEL_EXP);
  // Init startTime to current time. If all goes well, it will be reset after
  // first attempt to contact RM.
  retrystartTime = System.currentTimeMillis();
}
 
Example 8
Source Project: hadoop-gpu   File: NewJobWeightBooster.java    License: Apache License 2.0 5 votes vote down vote up
public void setConf(Configuration conf) {
  if (conf != null) {
    factor = conf.getFloat("mapred.newjobweightbooster.factor",
        DEFAULT_FACTOR);
    duration = conf.getLong("mapred.newjobweightbooster.duration",
        DEFAULT_DURATION);
  }
  super.setConf(conf);
}
 
Example 9
Source Project: hadoop   File: DFSUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION from configuration.
 * 
 * @param conf Configuration
 * @return Value of DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION
 */
public static float getInvalidateWorkPctPerIteration(Configuration conf) {
  float blocksInvalidateWorkPct = conf.getFloat(
      DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
      DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT);
  Preconditions.checkArgument(
      (blocksInvalidateWorkPct > 0 && blocksInvalidateWorkPct <= 1.0f),
      DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION +
      " = '" + blocksInvalidateWorkPct + "' is invalid. " +
      "It should be a positive, non-zero float value, not greater than 1.0f, " +
      "to indicate a percentage.");
  return blocksInvalidateWorkPct;
}
 
Example 10
Source Project: hadoop   File: CacheManager.java    License: Apache License 2.0 5 votes vote down vote up
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
 
Example 11
Source Project: RDFS   File: TrashPolicyBase.java    License: Apache License 2.0 5 votes vote down vote up
Emptier(Configuration conf) throws IOException {
  this.conf = conf;
  this.emptierInterval = (long)
                         (conf.getFloat("fs.trash.checkpoint.interval", 0) *
                          MSECS_PER_MINUTE);
  if (this.emptierInterval > deletionInterval ||
      this.emptierInterval == 0) {
    LOG.warn("The configured interval for checkpoint is " +
             this.emptierInterval + " minutes." +
             " Using interval of " + deletionInterval +
             " minutes that is used for deletion instead");
    this.emptierInterval = deletionInterval;
  }
}
 
Example 12
Source Project: tez   File: SkewAnalyzer.java    License: Apache License 2.0 5 votes vote down vote up
public SkewAnalyzer(Configuration config) {
  this.config = config;
  maxRatio = config.getFloat(ATTEMPT_SHUFFLE_KEY_GROUP_MAX_RATIO,
      ATTEMPT_SHUFFLE_KEY_GROUP_MAX_RATIO_DEFAULT);
  minRatio = config.getFloat(ATTEMPT_SHUFFLE_KEY_GROUP_MIN_RATIO,
      ATTEMPT_SHUFFLE_KEY_GROUP_MIN_RATIO_DEFAULT);
  maxShuffleBytesPerSource = config.getLong(SHUFFLE_BYTES_PER_ATTEMPT_PER_SOURCE,
      SHUFFLE_BYTES_PER_ATTEMPT_PER_SOURCE_DEFAULT);
}
 
Example 13
Source Project: big-c   File: JobImpl.java    License: Apache License 2.0 4 votes vote down vote up
public JobImpl(JobId jobId, ApplicationAttemptId applicationAttemptId,
    Configuration conf, EventHandler eventHandler,
    TaskAttemptListener taskAttemptListener,
    JobTokenSecretManager jobTokenSecretManager,
    Credentials jobCredentials, Clock clock,
    Map<TaskId, TaskInfo> completedTasksFromPreviousRun, MRAppMetrics metrics,
    OutputCommitter committer, boolean newApiCommitter, String userName,
    long appSubmitTime, List<AMInfo> amInfos, AppContext appContext,
    JobStateInternal forcedState, String forcedDiagnostic) {
  this.applicationAttemptId = applicationAttemptId;
  this.jobId = jobId;
  this.jobName = conf.get(JobContext.JOB_NAME, "<missing job name>");
  this.conf = new JobConf(conf);
  this.metrics = metrics;
  this.clock = clock;
  this.completedTasksFromPreviousRun = completedTasksFromPreviousRun;
  this.amInfos = amInfos;
  this.appContext = appContext;
  this.userName = userName;
  this.queueName = conf.get(MRJobConfig.QUEUE_NAME, "default");
  this.appSubmitTime = appSubmitTime;
  this.oldJobId = TypeConverter.fromYarn(jobId);
  this.committer = committer;
  this.newApiCommitter = newApiCommitter;

  this.taskAttemptListener = taskAttemptListener;
  this.eventHandler = eventHandler;
  ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
  this.readLock = readWriteLock.readLock();
  this.writeLock = readWriteLock.writeLock();

  this.jobCredentials = jobCredentials;
  this.jobTokenSecretManager = jobTokenSecretManager;

  this.aclsManager = new JobACLsManager(conf);
  this.username = System.getProperty("user.name");
  this.jobACLs = aclsManager.constructJobACLs(conf);

  ThreadFactory threadFactory = new ThreadFactoryBuilder()
    .setNameFormat("Job Fail Wait Timeout Monitor #%d")
    .setDaemon(true)
    .build();
  this.executor = new ScheduledThreadPoolExecutor(1, threadFactory);

  // This "this leak" is okay because the retained pointer is in an
  //  instance variable.
  stateMachine = stateMachineFactory.make(this);
  this.forcedState  = forcedState;
  if(forcedDiagnostic != null) {
    this.diagnostics.add(forcedDiagnostic);
  }
  
  this.maxAllowedFetchFailuresFraction = conf.getFloat(
      MRJobConfig.MAX_ALLOWED_FETCH_FAILURES_FRACTION,
      MRJobConfig.DEFAULT_MAX_ALLOWED_FETCH_FAILURES_FRACTION);
  this.maxFetchFailuresNotifications = conf.getInt(
      MRJobConfig.MAX_FETCH_FAILURES_NOTIFICATIONS,
      MRJobConfig.DEFAULT_MAX_FETCH_FAILURES_NOTIFICATIONS);
}
 
Example 14
Source Project: big-c   File: NodeStatusUpdaterImpl.java    License: Apache License 2.0 4 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  int memoryMb = 
      conf.getInt(
          YarnConfiguration.NM_PMEM_MB, YarnConfiguration.DEFAULT_NM_PMEM_MB);
  float vMemToPMem =             
      conf.getFloat(
          YarnConfiguration.NM_VMEM_PMEM_RATIO, 
          YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO); 
  int virtualMemoryMb = (int)Math.ceil(memoryMb * vMemToPMem);
  
  int virtualCores =
      conf.getInt(
          YarnConfiguration.NM_VCORES, YarnConfiguration.DEFAULT_NM_VCORES);

  this.totalResource = Resource.newInstance(memoryMb, virtualCores);
  metrics.addResource(totalResource);
  this.tokenKeepAliveEnabled = isTokenKeepAliveEnabled(conf);
  this.tokenRemovalDelayMs =
      conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
          YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS);

  this.minimumResourceManagerVersion = conf.get(
      YarnConfiguration.NM_RESOURCEMANAGER_MINIMUM_VERSION,
      YarnConfiguration.DEFAULT_NM_RESOURCEMANAGER_MINIMUM_VERSION);
  
  // Default duration to track stopped containers on nodemanager is 10Min.
  // This should not be assigned very large value as it will remember all the
  // containers stopped during that time.
  durationToTrackStoppedContainers =
      conf.getLong(YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS,
        600000);
  if (durationToTrackStoppedContainers < 0) {
    String message = "Invalid configuration for "
      + YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS + " default "
        + "value is 10Min(600000).";
    LOG.error(message);
    throw new YarnException(message);
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug(YARN_NODEMANAGER_DURATION_TO_TRACK_STOPPED_CONTAINERS + " :"
      + durationToTrackStoppedContainers);
  }
  super.serviceInit(conf);
  LOG.info("Initialized nodemanager for " + nodeId + ":" +
      " physical-memory=" + memoryMb + " virtual-memory=" + virtualMemoryMb +
      " virtual-cores=" + virtualCores);
}
 
Example 15
Source Project: hbase   File: BaseLoadBalancer.java    License: Apache License 2.0 4 votes vote down vote up
protected void setSlop(Configuration conf) {
  this.slop = conf.getFloat("hbase.regions.slop", (float) 0.2);
  this.overallSlop = conf.getFloat("hbase.regions.overallSlop", slop);
}
 
Example 16
Source Project: hadoop   File: DatanodeManager.java    License: Apache License 2.0 4 votes vote down vote up
DatanodeManager(final BlockManager blockManager, final Namesystem namesystem,
    final Configuration conf) throws IOException {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  
  this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf);
  this.decomManager = new DecommissionManager(namesystem, blockManager,
      heartbeatManager);
  this.fsClusterStats = newFSClusterStats();

  networktopology = NetworkTopology.getInstance(conf);

  this.defaultXferPort = NetUtils.createSocketAddr(
        conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort();
  this.defaultInfoPort = NetUtils.createSocketAddr(
        conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT)).getPort();
  this.defaultInfoSecurePort = NetUtils.createSocketAddr(
      conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY,
          DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort();
  this.defaultIpcPort = NetUtils.createSocketAddr(
        conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
  try {
    this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
      conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
  } catch (IOException e) {
    LOG.error("error reading hosts files: ", e);
  }

  this.dnsToSwitchMapping = ReflectionUtils.newInstance(
      conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
          ScriptBasedMapping.class, DNSToSwitchMapping.class), conf);
  
  this.rejectUnresolvedTopologyDN = conf.getBoolean(
      DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY,
      DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_DEFAULT);
  
  // If the dns to switch mapping supports cache, resolve network
  // locations of those hosts in the include list and store the mapping
  // in the cache; so future calls to resolve will be fast.
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    final ArrayList<String> locations = new ArrayList<String>();
    for (InetSocketAddress addr : hostFileManager.getIncludes()) {
      locations.add(addr.getAddress().getHostAddress());
    }
    dnsToSwitchMapping.resolve(locations);
  }

  final long heartbeatIntervalSeconds = conf.getLong(
      DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
      DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT);
  final int heartbeatRecheckInterval = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
  this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
      + 10 * 1000 * heartbeatIntervalSeconds;
  final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds),
      DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
  this.blockInvalidateLimit = conf.getInt(
      DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
  LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
      + "=" + this.blockInvalidateLimit);

  this.checkIpHostnameInRegistration = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY,
      DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT);
  LOG.info(DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY
      + "=" + checkIpHostnameInRegistration);

  this.avoidStaleDataNodesForRead = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT);
  this.avoidStaleDataNodesForWrite = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
  this.staleInterval = getStaleIntervalFromConf(conf, heartbeatExpireInterval);
  this.ratioUseStaleDataNodesForWrite = conf.getFloat(
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY,
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT);
  Preconditions.checkArgument(
      (ratioUseStaleDataNodesForWrite > 0 && 
          ratioUseStaleDataNodesForWrite <= 1.0f),
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY +
      " = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " +
      "It should be a positive non-zero float value, not greater than 1.0f.");
  this.timeBetweenResendingCachingDirectivesMs = conf.getLong(
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS,
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS_DEFAULT);
  this.blocksPerPostponedMisreplicatedBlocksRescan = conf.getLong(
      DFSConfigKeys.DFS_NAMENODE_BLOCKS_PER_POSTPONEDBLOCKS_RESCAN_KEY,
      DFSConfigKeys.DFS_NAMENODE_BLOCKS_PER_POSTPONEDBLOCKS_RESCAN_KEY_DEFAULT);
}
 
Example 17
Source Project: big-c   File: ContainersMonitorImpl.java    License: Apache License 2.0 4 votes vote down vote up
@Override
protected void serviceInit(Configuration conf) throws Exception {
  this.monitoringInterval =
      conf.getLong(YarnConfiguration.NM_CONTAINER_MON_INTERVAL_MS,
          YarnConfiguration.DEFAULT_NM_CONTAINER_MON_INTERVAL_MS);

  Class<? extends ResourceCalculatorPlugin> clazz =
      conf.getClass(YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, null,
          ResourceCalculatorPlugin.class);
  this.resourceCalculatorPlugin =
      ResourceCalculatorPlugin.getResourceCalculatorPlugin(clazz, conf);
  LOG.info(" Using ResourceCalculatorPlugin : "
      + this.resourceCalculatorPlugin);
  processTreeClass = conf.getClass(YarnConfiguration.NM_CONTAINER_MON_PROCESS_TREE, null,
          ResourceCalculatorProcessTree.class);
  this.conf = conf;
  LOG.info(" Using ResourceCalculatorProcessTree : "
      + this.processTreeClass);

  this.containerMetricsEnabled =
      conf.getBoolean(YarnConfiguration.NM_CONTAINER_METRICS_ENABLE,
          YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_ENABLE);
  this.containerMetricsPeriodMs =
      conf.getLong(YarnConfiguration.NM_CONTAINER_METRICS_PERIOD_MS,
          YarnConfiguration.DEFAULT_NM_CONTAINER_METRICS_PERIOD_MS);

  long configuredPMemForContainers = conf.getLong(
      YarnConfiguration.NM_PMEM_MB,
      YarnConfiguration.DEFAULT_NM_PMEM_MB) * 1024 * 1024l;

  long configuredVCoresForContainers = conf.getLong(
      YarnConfiguration.NM_VCORES,
      YarnConfiguration.DEFAULT_NM_VCORES);


  // Setting these irrespective of whether checks are enabled. Required in
  // the UI.
  // ///////// Physical memory configuration //////
  this.maxPmemAllottedForContainers = configuredPMemForContainers;
  this.maxVCoresAllottedForContainers = configuredVCoresForContainers;

  // ///////// Virtual memory configuration //////
  float vmemRatio = conf.getFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO,
      YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO);
  Preconditions.checkArgument(vmemRatio > 0.99f,
      YarnConfiguration.NM_VMEM_PMEM_RATIO + " should be at least 1.0");
  this.maxVmemAllottedForContainers =
      (long) (vmemRatio * configuredPMemForContainers);

  pmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED,
      YarnConfiguration.DEFAULT_NM_PMEM_CHECK_ENABLED);
  vmemCheckEnabled = conf.getBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED,
      YarnConfiguration.DEFAULT_NM_VMEM_CHECK_ENABLED);
  LOG.info("Physical memory check enabled: " + pmemCheckEnabled);
  LOG.info("Virtual memory check enabled: " + vmemCheckEnabled);

  nodeCpuPercentageForYARN =
      NodeManagerHardwareUtils.getNodeCpuPercentage(conf);

  if (pmemCheckEnabled) {
    // Logging if actual pmem cannot be determined.
    long totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
    if (this.resourceCalculatorPlugin != null) {
      totalPhysicalMemoryOnNM = this.resourceCalculatorPlugin
          .getPhysicalMemorySize();
      if (totalPhysicalMemoryOnNM <= 0) {
        LOG.warn("NodeManager's totalPmem could not be calculated. "
            + "Setting it to " + UNKNOWN_MEMORY_LIMIT);
        totalPhysicalMemoryOnNM = UNKNOWN_MEMORY_LIMIT;
      }
    }

    if (totalPhysicalMemoryOnNM != UNKNOWN_MEMORY_LIMIT &&
        this.maxPmemAllottedForContainers > totalPhysicalMemoryOnNM * 0.80f) {
      LOG.warn("NodeManager configured with "
          + TraditionalBinaryPrefix.long2String(maxPmemAllottedForContainers,
              "", 1)
          + " physical memory allocated to containers, which is more than "
          + "80% of the total physical memory available ("
          + TraditionalBinaryPrefix.long2String(totalPhysicalMemoryOnNM, "",
              1) + "). Thrashing might happen.");
    }
  }
  super.serviceInit(conf);
}
 
Example 18
Source Project: hadoop   File: KMSClientProvider.java    License: Apache License 2.0 4 votes vote down vote up
public KMSClientProvider(URI uri, Configuration conf) throws IOException {
  super(conf);
  kmsUrl = createServiceURL(extractKMSPath(uri));
  if ("https".equalsIgnoreCase(new URL(kmsUrl).getProtocol())) {
    sslFactory = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
    try {
      sslFactory.init();
    } catch (GeneralSecurityException ex) {
      throw new IOException(ex);
    }
  }
  int timeout = conf.getInt(TIMEOUT_ATTR, DEFAULT_TIMEOUT);
  authRetry = conf.getInt(AUTH_RETRY, DEFAULT_AUTH_RETRY);
  configurator = new TimeoutConnConfigurator(timeout, sslFactory);
  encKeyVersionQueue =
      new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>(
          conf.getInt(
              CommonConfigurationKeysPublic.KMS_CLIENT_ENC_KEY_CACHE_SIZE,
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_SIZE_DEFAULT),
          conf.getFloat(
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK,
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_LOW_WATERMARK_DEFAULT),
          conf.getInt(
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_MS,
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_EXPIRY_DEFAULT),
          conf.getInt(
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS,
              CommonConfigurationKeysPublic.
                  KMS_CLIENT_ENC_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
          new EncryptedQueueRefiller());
  authToken = new DelegationTokenAuthenticatedURL.Token();
  actualUgi =
      (UserGroupInformation.getCurrentUser().getAuthenticationMethod() ==
      UserGroupInformation.AuthenticationMethod.PROXY) ? UserGroupInformation
          .getCurrentUser().getRealUser() : UserGroupInformation
          .getCurrentUser();
}
 
Example 19
Source Project: tajo   File: OrcConf.java    License: Apache License 2.0 4 votes vote down vote up
public static float getFloatVar(Configuration conf, ConfVars var) {
  return conf.getFloat(var.varname, var.defaultFloatVal);
}
 
Example 20
Source Project: hbase   File: Waiter.java    License: Apache License 2.0 3 votes vote down vote up
/**
 * Returns the 'wait for ratio' used in the {@link #sleep(Configuration, long)},
 * {@link #waitFor(Configuration, long, Predicate)},
 * {@link #waitFor(Configuration, long, long, Predicate)} and
 * {@link #waitFor(Configuration, long, long, boolean, Predicate)} methods of the class
 * <p/>
 * This is useful to dynamically adjust max time out values when same test cases run in different
 * test machine settings without recompiling & re-deploying code.
 * <p/>
 * The value is obtained from the Java System property or configuration setting
 * <code>hbase.test.wait.for.ratio</code> which defaults to <code>1</code>.
 * @param conf the configuration
 * @return the 'wait for ratio' for the current test run.
 */
public static float getWaitForRatio(Configuration conf) {
  if (waitForRatio < 0) {
    // System property takes precedence over configuration setting
    if (System.getProperty(HBASE_TEST_WAIT_FOR_RATIO) != null) {
      waitForRatio = Float.parseFloat(System.getProperty(HBASE_TEST_WAIT_FOR_RATIO));
    } else {
      waitForRatio = conf.getFloat(HBASE_TEST_WAIT_FOR_RATIO, HBASE_WAIT_FOR_RATIO_DEFAULT);
    }
  }
  return waitForRatio;
}