org.apache.hadoop.util.LightWeightGSet Java Examples

The following examples show how to use org.apache.hadoop.util.LightWeightGSet. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BlocksMap.java    From big-c with Apache License 2.0 6 votes vote down vote up
BlocksMap(int capacity) {
  // Use 2% of total memory to size the GSet capacity
  this.capacity = capacity;
  this.blocks = new LightWeightGSet<Block, BlockInfoContiguous>(capacity) {
    @Override
    public Iterator<BlockInfoContiguous> iterator() {
      SetIterator iterator = new SetIterator();
      /*
       * Not tracking any modifications to set. As this set will be used
       * always under FSNameSystem lock, modifications will not cause any
       * ConcurrentModificationExceptions. But there is a chance of missing
       * newly added elements during iteration.
       */
      iterator.setTrackModification(false);
      return iterator;
    }
  };
}
 
Example #2
Source File: BlocksMap.java    From hadoop with Apache License 2.0 6 votes vote down vote up
BlocksMap(int capacity) {
  // Use 2% of total memory to size the GSet capacity
  this.capacity = capacity;
  this.blocks = new LightWeightGSet<Block, BlockInfoContiguous>(capacity) {
    @Override
    public Iterator<BlockInfoContiguous> iterator() {
      SetIterator iterator = new SetIterator();
      /*
       * Not tracking any modifications to set. As this set will be used
       * always under FSNameSystem lock, modifications will not cause any
       * ConcurrentModificationExceptions. But there is a chance of missing
       * newly added elements during iteration.
       */
      iterator.setTrackModification(false);
      return iterator;
    }
  };
}
 
Example #3
Source File: INodeMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map
      = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
 
Example #4
Source File: CacheManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
 
Example #5
Source File: RetryCache.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > 16 ? capacity : 16;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
  this.cacheName = cacheName;
  this.retryCacheMetrics =  RetryCacheMetrics.create(this);
}
 
Example #6
Source File: RetryCache.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Constructor
 * @param cacheName name to identify the cache by
 * @param percentage percentage of total java heap space used by this cache
 * @param expirationTime time for an entry to expire in nanoseconds
 */
public RetryCache(String cacheName, double percentage, long expirationTime) {
  int capacity = LightWeightGSet.computeCapacity(percentage, cacheName);
  capacity = capacity > 16 ? capacity : 16;
  this.set = new LightWeightCache<CacheEntry, CacheEntry>(capacity, capacity,
      expirationTime, 0);
  this.expirationTime = expirationTime;
  this.cacheName = cacheName;
  this.retryCacheMetrics =  RetryCacheMetrics.create(this);
}
 
Example #7
Source File: INodeMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
static INodeMap newInstance(INodeDirectory rootDir) {
  // Compute the map capacity by allocating 1% of total memory
  int capacity = LightWeightGSet.computeCapacity(1, "INodeMap");
  GSet<INode, INodeWithAdditionalFields> map
      = new LightWeightGSet<INode, INodeWithAdditionalFields>(capacity);
  map.put(rootDir);
  return new INodeMap(map);
}
 
Example #8
Source File: CacheManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
CacheManager(FSNamesystem namesystem, Configuration conf,
    BlockManager blockManager) {
  this.namesystem = namesystem;
  this.blockManager = blockManager;
  this.nextDirectiveId = 1;
  this.maxListCachePoolsResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
  this.maxListCacheDirectivesNumResponses = conf.getInt(
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
  scanIntervalMs = conf.getLong(
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS,
      DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS_DEFAULT);
  float cachedBlocksPercent = conf.getFloat(
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT,
        DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT);
  if (cachedBlocksPercent < MIN_CACHED_BLOCKS_PERCENT) {
    LOG.info("Using minimum value {} for {}", MIN_CACHED_BLOCKS_PERCENT,
      DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT);
    cachedBlocksPercent = MIN_CACHED_BLOCKS_PERCENT;
  }
  this.cachedBlocks = new LightWeightGSet<CachedBlock, CachedBlock>(
        LightWeightGSet.computeCapacity(cachedBlocksPercent,
            "cachedBlocks"));

}
 
Example #9
Source File: RetryCache.java    From big-c with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public LightWeightGSet<CacheEntry, CacheEntry> getCacheSet() {
  return set;
}
 
Example #10
Source File: GSetGeneratorBase.java    From NNAnalytics with Apache License 2.0 4 votes vote down vote up
public static GSet<INode, INodeWithAdditionalFields> getEmptyGSet() {
  return new LightWeightGSet<>(LightWeightGSet.computeCapacity(1, "test"));
}
 
Example #11
Source File: BlockInfoContiguous.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public void setNext(LightWeightGSet.LinkedElement next) {
  this.nextLinkedElement = next;
}
 
Example #12
Source File: BlockInfoContiguous.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public LightWeightGSet.LinkedElement getNext() {
  return nextLinkedElement;
}
 
Example #13
Source File: BlockManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
public BlockManager(final Namesystem namesystem, final Configuration conf)
  throws IOException {
  this.namesystem = namesystem;
  datanodeManager = new DatanodeManager(this, namesystem, conf);
  heartbeatManager = datanodeManager.getHeartbeatManager();

  startupDelayBlockDeletionInMs = conf.getLong(
      DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
      DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L;
  invalidateBlocks = new InvalidateBlocks(
      datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs);

  // Compute the map capacity by allocating 2% of total memory
  blocksMap = new BlocksMap(
      LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
  blockplacement = BlockPlacementPolicy.getInstance(
    conf, datanodeManager.getFSClusterStats(),
    datanodeManager.getNetworkTopology(),
    datanodeManager.getHost2DatanodeMap());
  storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite();
  pendingReplications = new PendingReplicationBlocks(conf.getInt(
    DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
    DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);

  blockTokenSecretManager = createBlockTokenSecretManager(conf);

  this.maxCorruptFilesReturned = conf.getInt(
    DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY,
    DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED);
  this.defaultReplication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
                                        DFSConfigKeys.DFS_REPLICATION_DEFAULT);

  final int maxR = conf.getInt(DFSConfigKeys.DFS_REPLICATION_MAX_KEY, 
                               DFSConfigKeys.DFS_REPLICATION_MAX_DEFAULT);
  final int minR = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
                               DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
  if (minR <= 0)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
        + " = " + minR + " <= 0");
  if (maxR > Short.MAX_VALUE)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_REPLICATION_MAX_KEY
        + " = " + maxR + " > " + Short.MAX_VALUE);
  if (minR > maxR)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
        + " = " + minR + " > "
        + DFSConfigKeys.DFS_REPLICATION_MAX_KEY
        + " = " + maxR);
  this.minReplication = (short)minR;
  this.maxReplication = (short)maxR;

  this.maxReplicationStreams =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
  this.replicationStreamsHardLimit =
      conf.getInt(
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
  this.shouldCheckForEnoughRacks =
      conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
          ? false : true;

  this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
  this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);

  this.replicationRecheckInterval = 
    conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
                DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L;
  
  this.encryptDataTransfer =
      conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY,
          DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
  
  this.maxNumBlocksToLog =
      conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
          DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
  this.numBlocksPerIteration = conf.getInt(
      DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT,
      DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT);
  
  LOG.info("defaultReplication         = " + defaultReplication);
  LOG.info("maxReplication             = " + maxReplication);
  LOG.info("minReplication             = " + minReplication);
  LOG.info("maxReplicationStreams      = " + maxReplicationStreams);
  LOG.info("shouldCheckForEnoughRacks  = " + shouldCheckForEnoughRacks);
  LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
  LOG.info("encryptDataTransfer        = " + encryptDataTransfer);
  LOG.info("maxNumBlocksToLog          = " + maxNumBlocksToLog);
}
 
Example #14
Source File: RetryCache.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public LightWeightGSet<CacheEntry, CacheEntry> getCacheSet() {
  return set;
}
 
Example #15
Source File: BlockInfoContiguous.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public void setNext(LightWeightGSet.LinkedElement next) {
  this.nextLinkedElement = next;
}
 
Example #16
Source File: BlockInfoContiguous.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Override
public LightWeightGSet.LinkedElement getNext() {
  return nextLinkedElement;
}
 
Example #17
Source File: BlockManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public BlockManager(final Namesystem namesystem, final Configuration conf)
  throws IOException {
  this.namesystem = namesystem;
  datanodeManager = new DatanodeManager(this, namesystem, conf);
  heartbeatManager = datanodeManager.getHeartbeatManager();

  startupDelayBlockDeletionInMs = conf.getLong(
      DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
      DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L;
  invalidateBlocks = new InvalidateBlocks(
      datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs);

  // Compute the map capacity by allocating 2% of total memory
  blocksMap = new BlocksMap(
      LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
  blockplacement = BlockPlacementPolicy.getInstance(
    conf, datanodeManager.getFSClusterStats(),
    datanodeManager.getNetworkTopology(),
    datanodeManager.getHost2DatanodeMap());
  storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite();
  pendingReplications = new PendingReplicationBlocks(conf.getInt(
    DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
    DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);

  blockTokenSecretManager = createBlockTokenSecretManager(conf);

  this.maxCorruptFilesReturned = conf.getInt(
    DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY,
    DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED);
  this.defaultReplication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
                                        DFSConfigKeys.DFS_REPLICATION_DEFAULT);

  final int maxR = conf.getInt(DFSConfigKeys.DFS_REPLICATION_MAX_KEY, 
                               DFSConfigKeys.DFS_REPLICATION_MAX_DEFAULT);
  final int minR = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
                               DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
  if (minR <= 0)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
        + " = " + minR + " <= 0");
  if (maxR > Short.MAX_VALUE)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_REPLICATION_MAX_KEY
        + " = " + maxR + " > " + Short.MAX_VALUE);
  if (minR > maxR)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
        + " = " + minR + " > "
        + DFSConfigKeys.DFS_REPLICATION_MAX_KEY
        + " = " + maxR);
  this.minReplication = (short)minR;
  this.maxReplication = (short)maxR;

  this.maxReplicationStreams =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
  this.replicationStreamsHardLimit =
      conf.getInt(
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
  this.shouldCheckForEnoughRacks =
      conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
          ? false : true;

  this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
  this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);

  this.replicationRecheckInterval = 
    conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
                DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L;
  
  this.encryptDataTransfer =
      conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY,
          DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
  
  this.maxNumBlocksToLog =
      conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
          DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
  this.numBlocksPerIteration = conf.getInt(
      DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT,
      DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT);
  
  LOG.info("defaultReplication         = " + defaultReplication);
  LOG.info("maxReplication             = " + maxReplication);
  LOG.info("minReplication             = " + minReplication);
  LOG.info("maxReplicationStreams      = " + maxReplicationStreams);
  LOG.info("shouldCheckForEnoughRacks  = " + shouldCheckForEnoughRacks);
  LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
  LOG.info("encryptDataTransfer        = " + encryptDataTransfer);
  LOG.info("maxNumBlocksToLog          = " + maxNumBlocksToLog);
}