Java Code Examples for org.apache.hadoop.conf.Configuration.getInt()

The following are Jave code examples for showing how to use getInt() of the org.apache.hadoop.conf.Configuration class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: easyhbase   File: HBaseAsyncOperationFactory.java   View Source Code Vote up 6 votes
public static HBaseAsyncOperation create(Configuration configuration) throws IOException {
    boolean enableAsyncMethod = configuration.getBoolean(ENABLE_ASYNC_METHOD,
            DEFAULT_ENABLE_ASYNC_METHOD);
    LOGGER.info("hbase.client.async.enable: " + enableAsyncMethod);
    if (!enableAsyncMethod) {
        return DisabledHBaseAsyncOperation.INSTANCE;
    }

    int queueSize = configuration.getInt(ASYNC_IN_QUEUE_SIZE, DEFAULT_ASYNC_IN_QUEUE_SIZE);

    if (configuration.get(ASYNC_PERIODIC_FLUSH_TIME, null) == null) {
        configuration.setInt(ASYNC_PERIODIC_FLUSH_TIME, DEFAULT_ASYNC_PERIODIC_FLUSH_TIME);
    }

    if (configuration.get(ASYNC_RETRY_COUNT, null) == null) {
        configuration.setInt(ASYNC_RETRY_COUNT, DEFAULT_ASYNC_RETRY_COUNT);
    }

    return new HBaseAsyncTemplate(configuration, queueSize);
}
 
Example 2
Project: hadoop-oss   File: EagerKeyGeneratorKeyProviderCryptoExtension.java   View Source Code Vote up 6 votes
public CryptoExtension(Configuration conf, 
    KeyProviderCryptoExtension keyProviderCryptoExtension) {
  this.keyProviderCryptoExtension = keyProviderCryptoExtension;
  encKeyVersionQueue =
      new ValueQueue<KeyProviderCryptoExtension.EncryptedKeyVersion>(
          conf.getInt(KMS_KEY_CACHE_SIZE,
              KMS_KEY_CACHE_SIZE_DEFAULT),
          conf.getFloat(KMS_KEY_CACHE_LOW_WATERMARK,
              KMS_KEY_CACHE_LOW_WATERMARK_DEFAULT),
          conf.getInt(KMS_KEY_CACHE_EXPIRY_MS,
              KMS_KEY_CACHE_EXPIRY_DEFAULT),
          conf.getInt(KMS_KEY_CACHE_NUM_REFILL_THREADS,
              KMS_KEY_CACHE_NUM_REFILL_THREADS_DEFAULT),
          SyncGenerationPolicy.LOW_WATERMARK, new EncryptedQueueRefiller()
      );
}
 
Example 3
Project: hadoop   File: CheckpointConf.java   View Source Code Vote up 5 votes
public CheckpointConf(Configuration conf) {
  checkpointCheckPeriod = conf.getLong(
      DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,
      DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT);
      
  checkpointPeriod = conf.getLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 
                                  DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT);
  checkpointTxnCount = conf.getLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 
                                DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT);
  maxRetriesOnMergeError = conf.getInt(DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY,
                                DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_DEFAULT);
  legacyOivImageDir = conf.get(DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY);
  warnForDeprecatedConfigs(conf);
}
 
Example 4
Project: ditb   File: RESTServlet.java   View Source Code Vote up 5 votes
/**
 * Constructor with existing configuration
 * @param conf existing configuration
 * @param userProvider the login user provider
 * @throws IOException
 */
RESTServlet(final Configuration conf,
    final UserProvider userProvider) throws IOException {
  this.realUser = userProvider.getCurrent().getUGI();
  this.conf = conf;
  registerCustomFilter(conf);

  int cleanInterval = conf.getInt(CLEANUP_INTERVAL, 10 * 1000);
  int maxIdleTime = conf.getInt(MAX_IDLETIME, 10 * 60 * 1000);
  connectionCache = new ConnectionCache(
    conf, userProvider, cleanInterval, maxIdleTime);
  if (supportsProxyuser()) {
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
  }
}
 
Example 5
Project: hadoop   File: AbstractYarnScheduler.java   View Source Code Vote up 5 votes
@Override
public void serviceInit(Configuration conf) throws Exception {
  nmExpireInterval =
      conf.getInt(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS,
        YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS);
  configuredMaximumAllocationWaitTime =
      conf.getLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS,
        YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS);
  createReleaseCache();
  super.serviceInit(conf);
}
 
Example 6
Project: hadoop   File: DistSum.java   View Source Code Vote up 5 votes
/** Partitions sigma into parts */
@Override
protected void map(NullWritable nw, SummationWritable sigma, final Context context
    ) throws IOException, InterruptedException {
  final Configuration conf = context.getConfiguration();
  final int nParts = conf.getInt(N_PARTS, 0);
  final Summation[] parts = sigma.getElement().partition(nParts);
  for(int i = 0; i < parts.length; ++i) {
    context.write(new IntWritable(i), new SummationWritable(parts[i]));
    LOG.info("parts[" + i + "] = " + parts[i]);
  }
}
 
Example 7
Project: ditb   File: BloomFilterFactory.java   View Source Code Vote up 5 votes
/**
 * Creates a new general (Row or RowCol) Bloom filter at the time of
 * {@link org.apache.hadoop.hbase.regionserver.StoreFile} writing.
 *
 * @param conf
 * @param cacheConf
 * @param bloomType
 * @param maxKeys an estimate of the number of keys we expect to insert.
 *        Irrelevant if compound Bloom filters are enabled.
 * @param writer the HFile writer
 * @return the new Bloom filter, or null in case Bloom filters are disabled
 *         or when failed to create one.
 */
public static BloomFilterWriter createGeneralBloomAtWrite(Configuration conf,
    CacheConfig cacheConf, BloomType bloomType, int maxKeys,
    HFile.Writer writer) {
  if (!isGeneralBloomEnabled(conf)) {
    LOG.trace("Bloom filters are disabled by configuration for "
        + writer.getPath()
        + (conf == null ? " (configuration is null)" : ""));
    return null;
  } else if (bloomType == BloomType.NONE) {
    LOG.trace("Bloom filter is turned off for the column family");
    return null;
  }

  float err = getErrorRate(conf);

  // In case of row/column Bloom filter lookups, each lookup is an OR if two
  // separate lookups. Therefore, if each lookup's false positive rate is p,
  // the resulting false positive rate is err = 1 - (1 - p)^2, and
  // p = 1 - sqrt(1 - err).
  if (bloomType == BloomType.ROWCOL) {
    err = (float) (1 - Math.sqrt(1 - err));
  }

  int maxFold = conf.getInt(IO_STOREFILE_BLOOM_MAX_FOLD,
      MAX_ALLOWED_FOLD_FACTOR);

  // Do we support compound bloom filters?
  // In case of compound Bloom filters we ignore the maxKeys hint.
  CompoundBloomFilterWriter bloomWriter = new CompoundBloomFilterWriter(getBloomBlockSize(conf),
      err, Hash.getHashType(conf), maxFold, cacheConf.shouldCacheBloomsOnWrite(),
      bloomType == BloomType.ROWCOL ? KeyValue.COMPARATOR : KeyValue.RAW_COMPARATOR);
  writer.addInlineBlockWriter(bloomWriter);
  return bloomWriter;
}
 
Example 8
Project: ditb   File: HeapMemStoreLAB.java   View Source Code Vote up 5 votes
public HeapMemStoreLAB(Configuration conf) {
  chunkSize = conf.getInt(CHUNK_SIZE_KEY, CHUNK_SIZE_DEFAULT);
  maxAlloc = conf.getInt(MAX_ALLOC_KEY, MAX_ALLOC_DEFAULT);
  this.chunkPool = MemStoreChunkPool.getPool(conf);

  // if we don't exclude allocations >CHUNK_SIZE, we'd infiniteloop on one!
  Preconditions.checkArgument(
    maxAlloc <= chunkSize,
    MAX_ALLOC_KEY + " must be less than " + CHUNK_SIZE_KEY);
}
 
Example 9
Project: hadoop   File: TaskHeartbeatHandler.java   View Source Code Vote up 5 votes
@Override
protected void serviceInit(Configuration conf) throws Exception {
  super.serviceInit(conf);
  taskTimeOut = conf.getInt(MRJobConfig.TASK_TIMEOUT, 5 * 60 * 1000);
  taskTimeOutCheckInterval =
      conf.getInt(MRJobConfig.TASK_TIMEOUT_CHECK_INTERVAL_MS, 30 * 1000);
}
 
Example 10
Project: hadoop   File: DynamicInputFormat.java   View Source Code Vote up 5 votes
private static int getMaxChunksTolerable(Configuration conf) {
  int maxChunksTolerable = conf.getInt(
      DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE,
      DistCpConstants.MAX_CHUNKS_TOLERABLE_DEFAULT);
  if (maxChunksTolerable <= 0) {
    LOG.warn(DistCpConstants.CONF_LABEL_MAX_CHUNKS_TOLERABLE +
        " should be positive. Fall back to default value: "
        + DistCpConstants.MAX_CHUNKS_TOLERABLE_DEFAULT);
    maxChunksTolerable = DistCpConstants.MAX_CHUNKS_TOLERABLE_DEFAULT;
  }
  return maxChunksTolerable;
}
 
Example 11
Project: ditb   File: AsyncProcess.java   View Source Code Vote up 4 votes
public AsyncProcess(ClusterConnection hc, Configuration conf, ExecutorService pool,
    RpcRetryingCallerFactory rpcCaller, boolean useGlobalErrors, RpcControllerFactory rpcFactory) {
  if (hc == null) {
    throw new IllegalArgumentException("HConnection cannot be null.");
  }

  this.connection = hc;
  this.pool = pool;
  this.globalErrors = useGlobalErrors ? new BatchErrors() : null;

  this.id = COUNTER.incrementAndGet();

  this.pause = conf.getLong(HConstants.HBASE_CLIENT_PAUSE,
      HConstants.DEFAULT_HBASE_CLIENT_PAUSE);
  this.numTries = conf.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER,
      HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER);
  this.timeout = conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY,
      HConstants.DEFAULT_HBASE_RPC_TIMEOUT);
  this.primaryCallTimeoutMicroseconds = conf.getInt(PRIMARY_CALL_TIMEOUT_KEY, 10000);

  this.maxTotalConcurrentTasks = conf.getInt(HConstants.HBASE_CLIENT_MAX_TOTAL_TASKS,
    HConstants.DEFAULT_HBASE_CLIENT_MAX_TOTAL_TASKS);
  this.maxConcurrentTasksPerServer = conf.getInt(HConstants.HBASE_CLIENT_MAX_PERSERVER_TASKS,
        HConstants.DEFAULT_HBASE_CLIENT_MAX_PERSERVER_TASKS);
  this.maxConcurrentTasksPerRegion = conf.getInt(HConstants.HBASE_CLIENT_MAX_PERREGION_TASKS,
        HConstants.DEFAULT_HBASE_CLIENT_MAX_PERREGION_TASKS);

  this.startLogErrorsCnt =
      conf.getInt(START_LOG_ERRORS_AFTER_COUNT_KEY, DEFAULT_START_LOG_ERRORS_AFTER_COUNT);

  if (this.maxTotalConcurrentTasks <= 0) {
    throw new IllegalArgumentException("maxTotalConcurrentTasks=" + maxTotalConcurrentTasks);
  }
  if (this.maxConcurrentTasksPerServer <= 0) {
    throw new IllegalArgumentException("maxConcurrentTasksPerServer=" +
        maxConcurrentTasksPerServer);
  }
  if (this.maxConcurrentTasksPerRegion <= 0) {
    throw new IllegalArgumentException("maxConcurrentTasksPerRegion=" +
        maxConcurrentTasksPerRegion);
  }

  // Server tracker allows us to do faster, and yet useful (hopefully), retries.
  // However, if we are too useful, we might fail very quickly due to retry count limit.
  // To avoid this, we are going to cheat for now (see HBASE-7659), and calculate maximum
  // retry time if normal retries were used. Then we will retry until this time runs out.
  // If we keep hitting one server, the net effect will be the incremental backoff, and
  // essentially the same number of retries as planned. If we have to do faster retries,
  // we will do more retries in aggregate, but the user will be none the wiser.
  this.serverTrackerTimeout = 0;
  for (int i = 0; i < this.numTries; ++i) {
    serverTrackerTimeout += ConnectionUtils.getPauseTime(this.pause, i);
  }

  this.rpcCallerFactory = rpcCaller;
  this.rpcFactory = rpcFactory;
}
 
Example 12
Project: hadoop   File: FsDatasetImpl.java   View Source Code Vote up 4 votes
/**
 * An FSDataset has a directory where it loads its data files.
 */
FsDatasetImpl(DataNode datanode, DataStorage storage, Configuration conf
    ) throws IOException {
  this.fsRunning = true;
  this.datanode = datanode;
  this.dataStorage = storage;
  this.conf = conf;
  // The number of volumes required for operation is the total number 
  // of volumes minus the number of failed volumes we can tolerate.
  final int volFailuresTolerated =
    conf.getInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
                DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);

  String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
  Collection<StorageLocation> dataLocations = DataNode.getStorageLocations(conf);
  List<VolumeFailureInfo> volumeFailureInfos = getInitialVolumeFailureInfos(
      dataLocations, storage);

  int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
  int volsFailed = volumeFailureInfos.size();
  this.validVolsRequired = volsConfigured - volFailuresTolerated;

  if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
    throw new DiskErrorException("Invalid volume failure "
        + " config value: " + volFailuresTolerated);
  }
  if (volsFailed > volFailuresTolerated) {
    throw new DiskErrorException("Too many failed volumes - "
        + "current valid volumes: " + storage.getNumStorageDirs() 
        + ", volumes configured: " + volsConfigured 
        + ", volumes failed: " + volsFailed
        + ", volume failures tolerated: " + volFailuresTolerated);
  }

  storageMap = new ConcurrentHashMap<String, DatanodeStorage>();
  volumeMap = new ReplicaMap(this);
  ramDiskReplicaTracker = RamDiskReplicaTracker.getInstance(conf, this);

  @SuppressWarnings("unchecked")
  final VolumeChoosingPolicy<FsVolumeImpl> blockChooserImpl =
      ReflectionUtils.newInstance(conf.getClass(
          DFSConfigKeys.DFS_DATANODE_FSDATASET_VOLUME_CHOOSING_POLICY_KEY,
          RoundRobinVolumeChoosingPolicy.class,
          VolumeChoosingPolicy.class), conf);
  volumes = new FsVolumeList(volumeFailureInfos, datanode.getBlockScanner(),
      blockChooserImpl);
  asyncDiskService = new FsDatasetAsyncDiskService(datanode, this);
  asyncLazyPersistService = new RamDiskAsyncLazyPersistService(datanode);
  deletingBlock = new HashMap<String, Set<Long>>();

  for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
    addVolume(dataLocations, storage.getStorageDir(idx));
  }
  setupAsyncLazyPersistThreads();

  cacheManager = new FsDatasetCache(this);

  // Start the lazy writer once we have built the replica maps.
  lazyWriter = new Daemon(new LazyWriter(conf));
  lazyWriter.start();
  registerMBean(datanode.getDatanodeUuid());
  localFS = FileSystem.getLocal(conf);
  blockPinningEnabled = conf.getBoolean(
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED,
    DFSConfigKeys.DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT);
}
 
Example 13
Project: hadoop   File: Gridmix.java   View Source Code Vote up 4 votes
/**
 * Create each component in the pipeline and start it.
 * @param conf Configuration data, no keys specific to this context
 * @param traceIn Either a Path to the trace data or &quot;-&quot; for
 *                stdin
 * @param ioPath &lt;ioPath&gt;/input/ is the dir from which input data is
 *               read and &lt;ioPath&gt;/distributedCache/ is the gridmix
 *               distributed cache directory.
 * @param scratchDir Path into which job output is written
 * @param startFlag Semaphore for starting job trace pipeline
 */
@SuppressWarnings("unchecked")
private void startThreads(Configuration conf, String traceIn, Path ioPath,
    Path scratchDir, CountDownLatch startFlag, UserResolver userResolver)
    throws IOException {
  try {
    Path inputDir = getGridmixInputDataPath(ioPath);
    GridmixJobSubmissionPolicy policy = getJobSubmissionPolicy(conf);
    LOG.info(" Submission policy is " + policy.name());
    statistics = new Statistics(conf, policy.getPollingInterval(), startFlag);
    monitor = createJobMonitor(statistics, conf);
    int noOfSubmitterThreads = 
      (policy == GridmixJobSubmissionPolicy.SERIAL) 
      ? 1
      : Runtime.getRuntime().availableProcessors() + 1;

    int numThreads = conf.getInt(GRIDMIX_SUB_THR, noOfSubmitterThreads);
    int queueDep = conf.getInt(GRIDMIX_QUE_DEP, 5);
    submitter = createJobSubmitter(monitor, numThreads, queueDep,
                                   new FilePool(conf, inputDir), userResolver, 
                                   statistics);
    distCacheEmulator = new DistributedCacheEmulator(conf, ioPath);

    factory = createJobFactory(submitter, traceIn, scratchDir, conf, 
                               startFlag, userResolver);
    factory.jobCreator.setDistCacheEmulator(distCacheEmulator);

    if (policy == GridmixJobSubmissionPolicy.SERIAL) {
      statistics.addJobStatsListeners(factory);
    } else {
      statistics.addClusterStatsObservers(factory);
    }

    // add the gridmix run summarizer to the statistics
    statistics.addJobStatsListeners(summarizer.getExecutionSummarizer());
    statistics.addClusterStatsObservers(summarizer.getClusterSummarizer());
    
    monitor.start();
    submitter.start();
  }catch(Exception e) {
    LOG.error(" Exception at start " ,e);
    throw new IOException(e);
  }
 }
 
Example 14
Project: hadoop   File: ReplicationParam.java   View Source Code Vote up 4 votes
/** @return the value or, if it is null, return the default from conf. */
public short getValue(final Configuration conf) {
  return getValue() != null? getValue()
      : (short)conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
}
 
Example 15
Project: hadoop   File: RandomTextDataGenerator.java   View Source Code Vote up 4 votes
/**
 * Get the configured random text data generator's list size.
 */
static int getRandomTextDataGeneratorListSize(Configuration conf) {
  return conf.getInt(GRIDMIX_DATAGEN_RANDOMTEXT_LISTSIZE, DEFAULT_LIST_SIZE);
}
 
Example 16
Project: spark_deep   File: Client.java   View Source Code Vote up 4 votes
public static ConnectionId getConnectionId(InetSocketAddress addr, Class<?> protocol, int rpcTimeout,
		Configuration conf) {
	return new ConnectionId(addr, protocol, rpcTimeout, conf.getInt("ipc.client.connection.maxidletime", 10000), // 10s
			conf.getInt("ipc.client.connect.max.retries", 10), conf.getBoolean("ipc.client.tcpnodelay", false),
			Client.getPingInterval(conf));
}
 
Example 17
Project: hadoop-oss   File: MapFile.java   View Source Code Vote up 4 votes
/**
 * This method attempts to fix a corrupt MapFile by re-creating its index.
 * @param fs filesystem
 * @param dir directory containing the MapFile data and index
 * @param keyClass key class (has to be a subclass of Writable)
 * @param valueClass value class (has to be a subclass of Writable)
 * @param dryrun do not perform any changes, just report what needs to be done
 * @return number of valid entries in this MapFile, or -1 if no fixing was needed
 * @throws Exception
 */
public static long fix(FileSystem fs, Path dir,
                       Class<? extends Writable> keyClass,
                       Class<? extends Writable> valueClass, boolean dryrun,
                       Configuration conf) throws Exception {
  String dr = (dryrun ? "[DRY RUN ] " : "");
  Path data = new Path(dir, DATA_FILE_NAME);
  Path index = new Path(dir, INDEX_FILE_NAME);
  int indexInterval = conf.getInt(Writer.INDEX_INTERVAL, 128);
  if (!fs.exists(data)) {
    // there's nothing we can do to fix this!
    throw new Exception(dr + "Missing data file in " + dir + ", impossible to fix this.");
  }
  if (fs.exists(index)) {
    // no fixing needed
    return -1;
  }
  SequenceFile.Reader dataReader = 
    new SequenceFile.Reader(conf, SequenceFile.Reader.file(data));
  if (!dataReader.getKeyClass().equals(keyClass)) {
    throw new Exception(dr + "Wrong key class in " + dir + ", expected" + keyClass.getName() +
                        ", got " + dataReader.getKeyClass().getName());
  }
  if (!dataReader.getValueClass().equals(valueClass)) {
    throw new Exception(dr + "Wrong value class in " + dir + ", expected" + valueClass.getName() +
                        ", got " + dataReader.getValueClass().getName());
  }
  long cnt = 0L;
  Writable key = ReflectionUtils.newInstance(keyClass, conf);
  Writable value = ReflectionUtils.newInstance(valueClass, conf);
  SequenceFile.Writer indexWriter = null;
  if (!dryrun) {
    indexWriter = 
      SequenceFile.createWriter(conf, 
                                SequenceFile.Writer.file(index), 
                                SequenceFile.Writer.keyClass(keyClass), 
                                SequenceFile.Writer.valueClass
                                  (LongWritable.class));
  }
  try {
    long pos = 0L;
    LongWritable position = new LongWritable();
    while(dataReader.next(key, value)) {
      cnt++;
      if (cnt % indexInterval == 0) {
        position.set(pos);
        if (!dryrun) indexWriter.append(key, position);
      }
      pos = dataReader.getPosition();
    }
  } catch(Throwable t) {
    // truncated data file. swallow it.
  }
  dataReader.close();
  if (!dryrun) indexWriter.close();
  return cnt;
}
 
Example 18
Project: ditb   File: FSHDFSUtils.java   View Source Code Vote up 4 votes
boolean recoverDFSFileLease(final DistributedFileSystem dfs, final Path p,
    final Configuration conf, final CancelableProgressable reporter)
throws IOException {
  LOG.info("Recover lease on dfs file " + p);
  long startWaiting = EnvironmentEdgeManager.currentTime();
  // Default is 15 minutes. It's huge, but the idea is that if we have a major issue, HDFS
  // usually needs 10 minutes before marking the nodes as dead. So we're putting ourselves
  // beyond that limit 'to be safe'.
  long recoveryTimeout = conf.getInt("hbase.lease.recovery.timeout", 900000) + startWaiting;
  // This setting should be a little bit above what the cluster dfs heartbeat is set to.
  long firstPause = conf.getInt("hbase.lease.recovery.first.pause", 4000);
  // This should be set to how long it'll take for us to timeout against primary datanode if it
  // is dead.  We set it to 61 seconds, 1 second than the default READ_TIMEOUT in HDFS, the
  // default value for DFS_CLIENT_SOCKET_TIMEOUT_KEY. If recovery is still failing after this
  // timeout, then further recovery will take liner backoff with this base, to avoid endless
  // preemptions when this value is not properly configured.
  long subsequentPauseBase = conf.getLong("hbase.lease.recovery.dfs.timeout", 61 * 1000);

  Method isFileClosedMeth = null;
  // whether we need to look for isFileClosed method
  boolean findIsFileClosedMeth = true;
  boolean recovered = false;
  // We break the loop if we succeed the lease recovery, timeout, or we throw an exception.
  for (int nbAttempt = 0; !recovered; nbAttempt++) {
    recovered = recoverLease(dfs, nbAttempt, p, startWaiting);
    if (recovered) break;
    checkIfCancelled(reporter);
    if (checkIfTimedout(conf, recoveryTimeout, nbAttempt, p, startWaiting)) break;
    try {
      // On the first time through wait the short 'firstPause'.
      if (nbAttempt == 0) {
        Thread.sleep(firstPause);
      } else {
        // Cycle here until (subsequentPause * nbAttempt) elapses.  While spinning, check
        // isFileClosed if available (should be in hadoop 2.0.5... not in hadoop 1 though.
        long localStartWaiting = EnvironmentEdgeManager.currentTime();
        while ((EnvironmentEdgeManager.currentTime() - localStartWaiting) <
            subsequentPauseBase * nbAttempt) {
          Thread.sleep(conf.getInt("hbase.lease.recovery.pause", 1000));
          if (findIsFileClosedMeth) {
            try {
              isFileClosedMeth = dfs.getClass().getMethod("isFileClosed",
                new Class[]{ Path.class });
            } catch (NoSuchMethodException nsme) {
              LOG.debug("isFileClosed not available");
            } finally {
              findIsFileClosedMeth = false;
            }
          }
          if (isFileClosedMeth != null && isFileClosed(dfs, isFileClosedMeth, p)) {
            recovered = true;
            break;
          }
          checkIfCancelled(reporter);
        }
      }
    } catch (InterruptedException ie) {
      InterruptedIOException iioe = new InterruptedIOException();
      iioe.initCause(ie);
      throw iioe;
    }
  }
  return recovered;
}
 
Example 19
Project: ditb   File: AsyncRpcClient.java   View Source Code Vote up 4 votes
/**
 * Constructor for tests
 *
 * @param configuration      to HBase
 * @param clusterId          for the cluster
 * @param localAddress       local address to connect to
 * @param metrics            the connection metrics
 * @param channelInitializer for custom channel handlers
 */
protected AsyncRpcClient(Configuration configuration, String clusterId,
    SocketAddress localAddress, MetricsConnection metrics,
    ChannelInitializer<SocketChannel> channelInitializer) {
  super(configuration, clusterId, localAddress, metrics);

  if (LOG.isDebugEnabled()) {
    LOG.debug("Starting async Hbase RPC client");
  }

  Pair<EventLoopGroup, Class<? extends Channel>> eventLoopGroupAndChannelClass;
  this.useGlobalEventLoopGroup = conf.getBoolean(USE_GLOBAL_EVENT_LOOP_GROUP, true);
  if (useGlobalEventLoopGroup) {
    eventLoopGroupAndChannelClass = getGlobalEventLoopGroup(configuration);
  } else {
    eventLoopGroupAndChannelClass = createEventLoopGroup(configuration);
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Use " + (useGlobalEventLoopGroup ? "global" : "individual") + " event loop group "
        + eventLoopGroupAndChannelClass.getFirst().getClass().getSimpleName());
  }

  this.connections = new PoolMap<>(getPoolType(configuration), getPoolSize(configuration));
  this.failedServers = new FailedServers(configuration);

  int operationTimeout = configuration.getInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT,
      HConstants.DEFAULT_HBASE_CLIENT_OPERATION_TIMEOUT);

  // Configure the default bootstrap.
  this.bootstrap = new Bootstrap();
  bootstrap.group(eventLoopGroupAndChannelClass.getFirst())
      .channel(eventLoopGroupAndChannelClass.getSecond())
      .option(ChannelOption.TCP_NODELAY, tcpNoDelay)
      .option(ChannelOption.SO_KEEPALIVE, tcpKeepAlive)
      .option(ChannelOption.CONNECT_TIMEOUT_MILLIS, operationTimeout);
  if (channelInitializer == null) {
    channelInitializer = DEFAULT_CHANNEL_INITIALIZER;
  }
  bootstrap.handler(channelInitializer);
  if (localAddress != null) {
    bootstrap.localAddress(localAddress);
  }
}
 
Example 20
Project: hadoop   File: RandomTextDataGenerator.java   View Source Code Vote up 4 votes
/**
 * Get the configured random text data generator word size.
 */
static int getRandomTextDataGeneratorWordSize(Configuration conf) {
  return conf.getInt(GRIDMIX_DATAGEN_RANDOMTEXT_WORDSIZE, DEFAULT_WORD_SIZE);
}