Java Code Examples for org.apache.hadoop.conf.Configuration#getInt()

The following examples show how to use org.apache.hadoop.conf.Configuration#getInt() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DefaultHeapMemoryTuner.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
public void setConf(Configuration conf) {
  this.conf = conf;
  this.maximumStepSize = conf.getFloat(MAX_STEP_KEY, DEFAULT_MAX_STEP_VALUE);
  this.minimumStepSize = conf.getFloat(MIN_STEP_KEY, DEFAULT_MIN_STEP_VALUE);
  this.step = this.maximumStepSize;
  this.sufficientMemoryLevel = conf.getFloat(SUFFICIENT_MEMORY_LEVEL_KEY,
      DEFAULT_SUFFICIENT_MEMORY_LEVEL_VALUE);
  this.tunerLookupPeriods = conf.getInt(LOOKUP_PERIODS_KEY, DEFAULT_LOOKUP_PERIODS);
  this.blockCachePercentMinRange = conf.getFloat(BLOCK_CACHE_SIZE_MIN_RANGE_KEY,
      conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT));
  this.blockCachePercentMaxRange = conf.getFloat(BLOCK_CACHE_SIZE_MAX_RANGE_KEY,
      conf.getFloat(HFILE_BLOCK_CACHE_SIZE_KEY, HConstants.HFILE_BLOCK_CACHE_SIZE_DEFAULT));
  this.globalMemStorePercentMinRange = conf.getFloat(MEMSTORE_SIZE_MIN_RANGE_KEY,
      MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false));
  this.globalMemStorePercentMaxRange = conf.getFloat(MEMSTORE_SIZE_MAX_RANGE_KEY,
      MemorySizeUtil.getGlobalMemStoreHeapPercent(conf, false));
  this.globalMemStoreLimitLowMarkPercent = MemorySizeUtil.getGlobalMemStoreHeapLowerMark(conf,
      true);
  // Default value of periods to ignore is number of lookup periods
  this.numPeriodsToIgnore = conf.getInt(NUM_PERIODS_TO_IGNORE, this.tunerLookupPeriods);
  this.rollingStatsForCacheMisses = new RollingStatCalculator(this.tunerLookupPeriods);
  this.rollingStatsForFlushes = new RollingStatCalculator(this.tunerLookupPeriods);
  this.rollingStatsForEvictions = new RollingStatCalculator(this.tunerLookupPeriods);
  this.rollingStatsForTunerSteps = new RollingStatCalculator(this.tunerLookupPeriods);
}
 
Example 2
Source File: FileSystem.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Return a set of server default configuration values
 * @return server default configuration values
 * @throws IOException
 * @deprecated use {@link #getServerDefaults(Path)} instead
 */
@Deprecated
public FsServerDefaults getServerDefaults() throws IOException {
  Configuration conf = getConf();
  // CRC32 is chosen as default as it is available in all 
  // releases that support checksum.
  // The client trash configuration is ignored.
  return new FsServerDefaults(getDefaultBlockSize(), 
      conf.getInt("io.bytes.per.checksum", 512), 
      64 * 1024, 
      getDefaultReplication(),
      conf.getInt("io.file.buffer.size", 4096),
      false,
      CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_DEFAULT,
      DataChecksum.Type.CRC32);
}
 
Example 3
Source File: DelegationTokenRenewer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
protected ThreadPoolExecutor createNewThreadPoolService(Configuration conf) {
  int nThreads = conf.getInt(
      YarnConfiguration.RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT,
      YarnConfiguration.DEFAULT_RM_DELEGATION_TOKEN_RENEWER_THREAD_COUNT);

  ThreadFactory tf = new ThreadFactoryBuilder()
      .setNameFormat("DelegationTokenRenewer #%d")
      .build();
  ThreadPoolExecutor pool =
      new ThreadPoolExecutor(nThreads, nThreads, 3L,
          TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>());
  pool.setThreadFactory(tf);
  pool.allowCoreThreadTimeOut(true);
  return pool;
}
 
Example 4
Source File: FileFixer.java    From RDFS with Apache License 2.0 5 votes vote down vote up
FileFixer(Configuration conf) throws IOException {
  this.conf = conf;
  blockFixInterval = conf.getInt("hightide.blockfix.interval",
                                 blockFixInterval);
  numThreads = conf.getInt("hightide.blockfix.numthreads", numThreads);

  pathToPolicy = new LinkedList<PathToPolicy>();
  executor = new ThreadPoolExecutor( numThreads, numThreads,
        THREADS_KEEP_ALIVE_SECONDS, TimeUnit.SECONDS,
        new LinkedBlockingQueue<Runnable>());

 // start a thread to purge enties from this set automatically
 filesBeingFixed = new PendingReplication(conf.getInt(
                         "dfs.hightide.pending.timeout.sec", -1) * 1000L);
}
 
Example 5
Source File: Main.java    From hbase-indexer with Apache License 2.0 5 votes vote down vote up
private void setupMetrics(Configuration conf) {
    String gangliaHost = conf.get(ConfKeys.GANGLIA_SERVER);
    if (gangliaHost != null) {
        int gangliaPort = conf.getInt(ConfKeys.GANGLIA_PORT, 8649);
        int interval = conf.getInt(ConfKeys.GANGLIA_INTERVAL, 60);
        log.info("Enabling Ganglia reporting to " + gangliaHost + ":" + gangliaPort);
        GangliaReporter.enable(interval, TimeUnit.SECONDS, gangliaHost, gangliaPort);
    }
}
 
Example 6
Source File: HBaseRecordReaderBase.java    From SpyGlass with Apache License 2.0 5 votes vote down vote up
/**
 * @param htable
 *          the {@link org.apache.hadoop.hbase.client.HTable} to scan.
 */
public void setHTable(HTable htable) {
    Configuration conf = htable.getConfiguration();
    logScannerActivity = conf.getBoolean(ScannerCallable.LOG_SCANNER_ACTIVITY,
            false);
    logPerRowCount = conf.getInt(LOG_PER_ROW_COUNT, 100);
    this.htable = htable;
}
 
Example 7
Source File: DirectoryScanner.java    From hadoop with Apache License 2.0 5 votes vote down vote up
DirectoryScanner(DataNode datanode, FsDatasetSpi<?> dataset, Configuration conf) {
  this.datanode = datanode;
  this.dataset = dataset;
  int interval = conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY,
      DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT);
  scanPeriodMsecs = interval * 1000L; //msec
  int threads = 
      conf.getInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY,
                  DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT);

  reportCompileThreadPool = Executors.newFixedThreadPool(threads, 
      new Daemon.DaemonFactory());
  masterThread = new ScheduledThreadPoolExecutor(1,
      new Daemon.DaemonFactory());
}
 
Example 8
Source File: SequenceFileInputFilter.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** configure the filter by checking the configuration
 * 
 * @param conf configuration
 */
public void setConf(Configuration conf) {
  this.frequency = conf.getInt(FILTER_FREQUENCY, 10);
  if (this.frequency <= 0) {
    throw new RuntimeException(
      "Negative "+FILTER_FREQUENCY + ": " + this.frequency);
  }
  this.conf = conf;
}
 
Example 9
Source File: ChecksumFileSystem.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void setConf(Configuration conf) {
  super.setConf(conf);
  if (conf != null) {
    bytesPerChecksum = conf.getInt(LocalFileSystemConfigKeys.LOCAL_FS_BYTES_PER_CHECKSUM_KEY,
                     LocalFileSystemConfigKeys.LOCAL_FS_BYTES_PER_CHECKSUM_DEFAULT);
  }
}
 
Example 10
Source File: TestAuxServices.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override 
protected void serviceInit(Configuration conf) throws Exception {
  remaining_init = conf.getInt(idef + ".expected.init", 0);
  remaining_stop = conf.getInt(idef + ".expected.stop", 0);
  super.serviceInit(conf);
}
 
Example 11
Source File: SimpleDBProviderBackend.java    From incubator-sentry with Apache License 2.0 4 votes vote down vote up
public SimpleDBProviderBackend(Configuration conf) throws Exception {
  this.conf = conf;
  this.retryCount = conf.getInt(ServiceConstants.ClientConfig.RETRY_COUNT_CONF, ServiceConstants.ClientConfig.RETRY_COUNT_DEFAULT);
  this.retryIntervalSec = conf.getInt(ServiceConstants.ClientConfig.RETRY_INTERVAL_SEC_CONF, ServiceConstants.ClientConfig.RETRY_INTERVAL_SEC_DEFAULT);
}
 
Example 12
Source File: File.java    From nutch-htmlunit with Apache License 2.0 4 votes vote down vote up
/**
 * Set the {@link Configuration} object
 */
public void setConf(Configuration conf) {
  this.conf = conf;
  this.maxContentLength = conf.getInt("file.content.limit", 64 * 1024);
  this.crawlParents = conf.getBoolean("file.crawl.parent", true);
}
 
Example 13
Source File: Aegisthus.java    From aegisthus with Apache License 2.0 4 votes vote down vote up
@Override
public int run(String[] args) throws Exception {
    Job job = Job.getInstance(getConf());
    Configuration configuration = job.getConfiguration();

    job.setJarByClass(Aegisthus.class);
    CommandLine cl = getOptions(args);
    if (cl == null) {
        return 1;
    }

    // Check all of the paths and load the sstable version from the input filenames
    List<Path> paths = Lists.newArrayList();
    if (cl.hasOption(Feature.CMD_ARG_INPUT_FILE)) {
        for (String input : cl.getOptionValues(Feature.CMD_ARG_INPUT_FILE)) {
            paths.add(new Path(input));
        }
    }
    if (cl.hasOption(Feature.CMD_ARG_INPUT_DIR)) {
        paths.addAll(getDataFiles(configuration, cl.getOptionValue(Feature.CMD_ARG_INPUT_DIR)));
    }
    LOG.info("Processing paths: {}", paths);

    // At this point we have the version of sstable that we can use for this run
    Descriptor.Version version = Descriptor.Version.CURRENT;
    if (cl.hasOption(Feature.CMD_ARG_SSTABLE_OUTPUT_VERSION)) {
        version = new Descriptor.Version(cl.getOptionValue(Feature.CMD_ARG_SSTABLE_OUTPUT_VERSION));
    }
    configuration.set(Feature.CONF_SSTABLE_VERSION, version.toString());

    if (configuration.get(Feature.CONF_CQL_SCHEMA) != null) {
        setConfigurationFromCql(configuration);
    }

    if(cl.hasOption(Feature.CMD_ARG_COMBINE_SPLITS)) {
        job.setInputFormatClass(AegisthusCombinedInputFormat.class);
    } else {
        job.setInputFormatClass(AegisthusInputFormat.class);
    }
    job.setMapOutputKeyClass(AegisthusKey.class);
    job.setMapOutputValueClass(AtomWritable.class);
    job.setOutputKeyClass(AegisthusKey.class);
    job.setOutputValueClass(RowWritable.class);
    job.setMapperClass(AegisthusKeyMapper.class);
    job.setReducerClass(CassSSTableReducer.class);
    job.setGroupingComparatorClass(AegisthusKeyGroupingComparator.class);
    job.setPartitionerClass(AegisthusKeyPartitioner.class);
    job.setSortComparatorClass(AegisthusKeySortingComparator.class);

    TextInputFormat.setInputPaths(job, paths.toArray(new Path[paths.size()]));

    if (cl.hasOption(Feature.CMD_ARG_PRODUCE_SSTABLE)) {
        job.setOutputFormatClass(SSTableOutputFormat.class);
    } else {
        job.setOutputFormatClass(JsonOutputFormat.class);
    }
    CustomFileNameFileOutputFormat.setOutputPath(job, new Path(cl.getOptionValue(Feature.CMD_ARG_OUTPUT_DIR)));

    job.submit();
    if (configuration.getBoolean(Feature.CONF_SHUTDOWN_HOOK, true)) {
        Runtime.getRuntime().addShutdownHook(new JobKiller(job));
    }

    System.out.println(job.getJobID());
    System.out.println(job.getTrackingURL());
    boolean success = job.waitForCompletion(true);

    if (success) {
        Counter errorCounter = job.getCounters().findCounter("aegisthus", "error_skipped_input");
        long errorCount = errorCounter != null ? errorCounter.getValue() : 0L;
        int maxAllowed = configuration.getInt(Feature.CONF_MAX_CORRUPT_FILES_TO_SKIP, 0);
        if (errorCounter != null && errorCounter.getValue() > maxAllowed) {
            LOG.error("Found {} corrupt files which is greater than the max allowed {}", errorCount, maxAllowed);
            success = false;
        } else if (errorCount > 0) {
            LOG.warn("Found {} corrupt files but not failing the job because the max allowed is {}",
                    errorCount, maxAllowed);
        }
    }

    return success ? 0 : 1;
}
 
Example 14
Source File: CacheConfig.java    From rubix with Apache License 2.0 4 votes vote down vote up
public static int getTranportPoolMaxSize(Configuration conf)
{
  return conf.getInt(KEY_POOL_MAX_SIZE, DEFAULT_KEY_POOL_MAX_SIZE);
}
 
Example 15
Source File: DFSClient.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Create a new DFSClient connected to the given nameNodeAddr or rpcNamenode.
 * Exactly one of nameNodeAddr or rpcNamenode must be null.
 */
DFSClient(InetSocketAddress nameNodeAddr, ClientProtocol rpcNamenode,
    Configuration conf, FileSystem.Statistics stats)
  throws IOException {
  this.conf = conf;
  this.stats = stats;
  this.socketTimeout = conf.getInt("dfs.socket.timeout",
                                   HdfsConstants.READ_TIMEOUT);
  this.socketReadExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_READ_EXTENSION,
      HdfsConstants.READ_TIMEOUT_EXTENSION);
  this.timeoutValue = this.socketTimeout;
  this.datanodeWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout",
                                          HdfsConstants.WRITE_TIMEOUT);
  this.datanodeWriteExtentionTimeout = conf.getInt(
      HdfsConstants.DFS_DATANODE_WRITE_EXTENTSION,
      HdfsConstants.WRITE_TIMEOUT_EXTENSION);    
  this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
  // dfs.write.packet.size is an internal config variable
  this.writePacketSize = conf.getInt("dfs.write.packet.size", 64*1024);
  this.minReadSpeedBps = conf.getLong("dfs.min.read.speed.bps", -1);
  this.maxBlockAcquireFailures = getMaxBlockAcquireFailures(conf);
  this.localHost = InetAddress.getLocalHost();
  
  // fetch network location of localhost
  this.pseuDatanodeInfoForLocalhost = new DatanodeInfo(new DatanodeID(
      this.localHost.getHostAddress()));
  this.dnsToSwitchMapping = ReflectionUtils.newInstance(
      conf.getClass("topology.node.switch.mapping.impl", ScriptBasedMapping.class,
        DNSToSwitchMapping.class), conf);
  ArrayList<String> tempList = new ArrayList<String>();
  tempList.add(this.localHost.getHostName());
  List<String> retList = dnsToSwitchMapping.resolve(tempList);
  if (retList != null && retList.size() > 0) {
    localhostNetworkLocation = retList.get(0);
    this.pseuDatanodeInfoForLocalhost.setNetworkLocation(localhostNetworkLocation);
  }

  // The hdfsTimeout is currently the same as the ipc timeout
  this.hdfsTimeout = Client.getTimeout(conf);

  this.closeFileTimeout = conf.getLong("dfs.client.closefile.timeout", this.hdfsTimeout);

  try {
    this.ugi = UnixUserGroupInformation.login(conf, true);
  } catch (LoginException e) {
    throw (IOException)(new IOException().initCause(e));
  }

  String taskId = conf.get("mapred.task.id");
  if (taskId != null) {
    this.clientName = "DFSClient_" + taskId + "_" + r.nextInt()
                    + "_" + Thread.currentThread().getId();
  } else {
    this.clientName = "DFSClient_" + r.nextInt();
  }
  defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
  defaultReplication = (short) conf.getInt("dfs.replication", 3);

  if (nameNodeAddr != null && rpcNamenode == null) {
    this.nameNodeAddr = nameNodeAddr;
    getNameNode();
  } else if (nameNodeAddr == null && rpcNamenode != null) {
    //This case is used for testing.
    if (rpcNamenode instanceof NameNode) {
      this.namenodeProtocolProxy = createRPCNamenode(((NameNode)rpcNamenode).getNameNodeAddress(), conf, ugi);
    }
    this.namenode = this.rpcNamenode = rpcNamenode;
  } else {
    throw new IllegalArgumentException(
        "Expecting exactly one of nameNodeAddr and rpcNamenode being null: "
        + "nameNodeAddr=" + nameNodeAddr + ", rpcNamenode=" + rpcNamenode);
  }
  // read directly from the block file if configured.
  this.shortCircuitLocalReads = conf.getBoolean("dfs.read.shortcircuit", false);
  if (this.shortCircuitLocalReads) {
    LOG.debug("Configured to shortcircuit reads to " + localHost);
  }
  this.leasechecker = new LeaseChecker(this.clientName, this.conf);
  // by default, if the ipTosValue is less than 0(for example -1), 
  // we will not set it in the socket.
  this.ipTosValue = conf.getInt("dfs.client.tos.value", 
  							  NetUtils.NOT_SET_IP_TOS);
  if (this.ipTosValue > NetUtils.IP_TOS_MAX_VALUE) {
  	LOG.warn("dfs.client.tos.value " + ipTosValue + 
  			 " exceeds the max allowed value " + NetUtils.IP_TOS_MAX_VALUE + 
  			 ", will not take affect");
  	this.ipTosValue = NetUtils.NOT_SET_IP_TOS;
  }
}
 
Example 16
Source File: SecureDataNodeStarter.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Acquire privileged resources (i.e., the privileged ports) for the data
 * node. The privileged resources consist of the port of the RPC server and
 * the port of HTTP (not HTTPS) server.
 */
@VisibleForTesting
public static SecureResources getSecureResources(Configuration conf)
    throws Exception {
  HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
  boolean isSecure = UserGroupInformation.isSecurityEnabled();

  // Obtain secure port for data streaming to datanode
  InetSocketAddress streamingAddr  = DataNode.getStreamingAddr(conf);
  int socketWriteTimeout = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
      HdfsServerConstants.WRITE_TIMEOUT);

  ServerSocket ss = (socketWriteTimeout > 0) ? 
      ServerSocketChannel.open().socket() : new ServerSocket();
  ss.bind(streamingAddr, 0);

  // Check that we got the port we need
  if (ss.getLocalPort() != streamingAddr.getPort()) {
    throw new RuntimeException(
        "Unable to bind on specified streaming port in secure "
            + "context. Needed " + streamingAddr.getPort() + ", got "
            + ss.getLocalPort());
  }

  if (!SecurityUtil.isPrivilegedPort(ss.getLocalPort()) && isSecure) {
    throw new RuntimeException(
      "Cannot start secure datanode with unprivileged RPC ports");
  }

  System.err.println("Opened streaming server at " + streamingAddr);

  // Bind a port for the web server. The code intends to bind HTTP server to
  // privileged port only, as the client can authenticate the server using
  // certificates if they are communicating through SSL.
  final ServerSocketChannel httpChannel;
  if (policy.isHttpEnabled()) {
    httpChannel = ServerSocketChannel.open();
    InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
    httpChannel.socket().bind(infoSocAddr);
    InetSocketAddress localAddr = (InetSocketAddress) httpChannel.socket()
      .getLocalSocketAddress();

    if (localAddr.getPort() != infoSocAddr.getPort()) {
      throw new RuntimeException("Unable to bind on specified info port in secure " +
          "context. Needed " + streamingAddr.getPort() + ", got " + ss.getLocalPort());
    }
    System.err.println("Successfully obtained privileged resources (streaming port = "
        + ss + " ) (http listener port = " + localAddr.getPort() +")");

    if (localAddr.getPort() > 1023 && isSecure) {
      throw new RuntimeException(
          "Cannot start secure datanode with unprivileged HTTP ports");
    }
    System.err.println("Opened info server at " + infoSocAddr);
  } else {
    httpChannel = null;
  }

  return new SecureResources(ss, httpChannel);
}
 
Example 17
Source File: DatanodeManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
DatanodeManager(final BlockManager blockManager, final Namesystem namesystem,
    final Configuration conf) throws IOException {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  
  this.heartbeatManager = new HeartbeatManager(namesystem, blockManager, conf);
  this.decomManager = new DecommissionManager(namesystem, blockManager,
      heartbeatManager);
  this.fsClusterStats = newFSClusterStats();

  networktopology = NetworkTopology.getInstance(conf);

  this.defaultXferPort = NetUtils.createSocketAddr(
        conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_ADDRESS_DEFAULT)).getPort();
  this.defaultInfoPort = NetUtils.createSocketAddr(
        conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_DEFAULT)).getPort();
  this.defaultInfoSecurePort = NetUtils.createSocketAddr(
      conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY,
          DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT)).getPort();
  this.defaultIpcPort = NetUtils.createSocketAddr(
        conf.getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
            DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
  try {
    this.hostFileManager.refresh(conf.get(DFSConfigKeys.DFS_HOSTS, ""),
      conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
  } catch (IOException e) {
    LOG.error("error reading hosts files: ", e);
  }

  this.dnsToSwitchMapping = ReflectionUtils.newInstance(
      conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
          ScriptBasedMapping.class, DNSToSwitchMapping.class), conf);
  
  this.rejectUnresolvedTopologyDN = conf.getBoolean(
      DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY,
      DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_DEFAULT);
  
  // If the dns to switch mapping supports cache, resolve network
  // locations of those hosts in the include list and store the mapping
  // in the cache; so future calls to resolve will be fast.
  if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
    final ArrayList<String> locations = new ArrayList<String>();
    for (InetSocketAddress addr : hostFileManager.getIncludes()) {
      locations.add(addr.getAddress().getHostAddress());
    }
    dnsToSwitchMapping.resolve(locations);
  }

  final long heartbeatIntervalSeconds = conf.getLong(
      DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
      DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT);
  final int heartbeatRecheckInterval = conf.getInt(
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
  this.heartbeatExpireInterval = 2 * heartbeatRecheckInterval
      + 10 * 1000 * heartbeatIntervalSeconds;
  final int blockInvalidateLimit = Math.max(20*(int)(heartbeatIntervalSeconds),
      DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
  this.blockInvalidateLimit = conf.getInt(
      DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, blockInvalidateLimit);
  LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY
      + "=" + this.blockInvalidateLimit);

  this.checkIpHostnameInRegistration = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY,
      DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT);
  LOG.info(DFSConfigKeys.DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY
      + "=" + checkIpHostnameInRegistration);

  this.avoidStaleDataNodesForRead = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT);
  this.avoidStaleDataNodesForWrite = conf.getBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY,
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_DEFAULT);
  this.staleInterval = getStaleIntervalFromConf(conf, heartbeatExpireInterval);
  this.ratioUseStaleDataNodesForWrite = conf.getFloat(
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY,
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_DEFAULT);
  Preconditions.checkArgument(
      (ratioUseStaleDataNodesForWrite > 0 && 
          ratioUseStaleDataNodesForWrite <= 1.0f),
      DFSConfigKeys.DFS_NAMENODE_USE_STALE_DATANODE_FOR_WRITE_RATIO_KEY +
      " = '" + ratioUseStaleDataNodesForWrite + "' is invalid. " +
      "It should be a positive non-zero float value, not greater than 1.0f.");
  this.timeBetweenResendingCachingDirectivesMs = conf.getLong(
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS,
      DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_RETRY_INTERVAL_MS_DEFAULT);
  this.blocksPerPostponedMisreplicatedBlocksRescan = conf.getLong(
      DFSConfigKeys.DFS_NAMENODE_BLOCKS_PER_POSTPONEDBLOCKS_RESCAN_KEY,
      DFSConfigKeys.DFS_NAMENODE_BLOCKS_PER_POSTPONEDBLOCKS_RESCAN_KEY_DEFAULT);
}
 
Example 18
Source File: ContainerExecutor.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** 
 *  Return a command to execute the given command in OS shell.
 *  On Windows, the passed in groupId can be used to launch
 *  and associate the given groupId in a process group. On
 *  non-Windows, groupId is ignored. 
 */
protected String[] getRunCommand(String command, String groupId,
    String userName, Path pidFile, Configuration conf, Resource resource) {
  boolean containerSchedPriorityIsSet = false;
  int containerSchedPriorityAdjustment = 
      YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY;

  if (conf.get(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY) != 
      null) {
    containerSchedPriorityIsSet = true;
    containerSchedPriorityAdjustment = conf 
        .getInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, 
        YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY);
  }

  if (Shell.WINDOWS) {
    int cpuRate = -1;
    int memory = -1;
    if (resource != null) {
      if (conf
          .getBoolean(
              YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED,
              YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED)) {
        memory = resource.getMemory();
      }

      if (conf.getBoolean(
          YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED,
          YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED)) {
        int containerVCores = resource.getVirtualCores();
        int nodeVCores = conf.getInt(YarnConfiguration.NM_VCORES,
            YarnConfiguration.DEFAULT_NM_VCORES);
        // cap overall usage to the number of cores allocated to YARN
        int nodeCpuPercentage = Math
            .min(
                conf.getInt(
                    YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
                    YarnConfiguration.DEFAULT_NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT),
                100);
        nodeCpuPercentage = Math.max(0, nodeCpuPercentage);
        if (nodeCpuPercentage == 0) {
          String message = "Illegal value for "
              + YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT
              + ". Value cannot be less than or equal to 0.";
          throw new IllegalArgumentException(message);
        }
        float yarnVCores = (nodeCpuPercentage * nodeVCores) / 100.0f;
        // CPU should be set to a percentage * 100, e.g. 20% cpu rate limit
        // should be set as 20 * 100. The following setting is equal to:
        // 100 * (100 * (vcores / Total # of cores allocated to YARN))
        cpuRate = Math.min(10000,
            (int) ((containerVCores * 10000) / yarnVCores));
      }
    }
    return new String[] { Shell.WINUTILS, "task", "create", "-m",
        String.valueOf(memory), "-c", String.valueOf(cpuRate), groupId,
        "cmd /c " + command };
  } else {
    List<String> retCommand = new ArrayList<String>();
    if (containerSchedPriorityIsSet) {
      retCommand.addAll(Arrays.asList("nice", "-n",
          Integer.toString(containerSchedPriorityAdjustment)));
    }
    retCommand.addAll(Arrays.asList("bash", command));
    return retCommand.toArray(new String[retCommand.size()]);
  }

}
 
Example 19
Source File: CalculateStatsFromBaseCuboidPartitioner.java    From kylin-on-parquet-v2 with Apache License 2.0 4 votes vote down vote up
@Override
public void setConf(Configuration conf) {
    this.conf = conf;
    hllShardBase = conf.getInt(BatchConstants.CFG_HLL_REDUCER_NUM, 1);
    logger.info("shard base for hll is " + hllShardBase);
}
 
Example 20
Source File: ContainerExecutor.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** 
 *  Return a command to execute the given command in OS shell.
 *  On Windows, the passed in groupId can be used to launch
 *  and associate the given groupId in a process group. On
 *  non-Windows, groupId is ignored. 
 */
protected String[] getRunCommand(String command, String groupId,
    String userName, Path pidFile, Configuration conf, Resource resource) {
  boolean containerSchedPriorityIsSet = false;
  int containerSchedPriorityAdjustment = 
      YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY;

  if (conf.get(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY) != 
      null) {
    containerSchedPriorityIsSet = true;
    containerSchedPriorityAdjustment = conf 
        .getInt(YarnConfiguration.NM_CONTAINER_EXECUTOR_SCHED_PRIORITY, 
        YarnConfiguration.DEFAULT_NM_CONTAINER_EXECUTOR_SCHED_PRIORITY);
  }

  if (Shell.WINDOWS) {
    int cpuRate = -1;
    int memory = -1;
    if (resource != null) {
      if (conf
          .getBoolean(
              YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED,
              YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED)) {
        memory = resource.getMemory();
      }

      if (conf.getBoolean(
          YarnConfiguration.NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED,
          YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_CPU_LIMIT_ENABLED)) {
        int containerVCores = resource.getVirtualCores();
        int nodeVCores = conf.getInt(YarnConfiguration.NM_VCORES,
            YarnConfiguration.DEFAULT_NM_VCORES);
        // cap overall usage to the number of cores allocated to YARN
        int nodeCpuPercentage = Math
            .min(
                conf.getInt(
                    YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT,
                    YarnConfiguration.DEFAULT_NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT),
                100);
        nodeCpuPercentage = Math.max(0, nodeCpuPercentage);
        if (nodeCpuPercentage == 0) {
          String message = "Illegal value for "
              + YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT
              + ". Value cannot be less than or equal to 0.";
          throw new IllegalArgumentException(message);
        }
        float yarnVCores = (nodeCpuPercentage * nodeVCores) / 100.0f;
        // CPU should be set to a percentage * 100, e.g. 20% cpu rate limit
        // should be set as 20 * 100. The following setting is equal to:
        // 100 * (100 * (vcores / Total # of cores allocated to YARN))
        cpuRate = Math.min(10000,
            (int) ((containerVCores * 10000) / yarnVCores));
      }
    }
    return new String[] { Shell.WINUTILS, "task", "create", "-m",
        String.valueOf(memory), "-c", String.valueOf(cpuRate), groupId,
        "cmd /c " + command };
  } else {
    List<String> retCommand = new ArrayList<String>();
    if (containerSchedPriorityIsSet) {
      retCommand.addAll(Arrays.asList("nice", "-n",
          Integer.toString(containerSchedPriorityAdjustment)));
    }
    retCommand.addAll(Arrays.asList("bash", command));
    return retCommand.toArray(new String[retCommand.size()]);
  }

}