Java Code Examples for org.apache.hadoop.hdfs.DFSUtil#getInvalidateWorkPctPerIteration()

The following examples show how to use org.apache.hadoop.hdfs.DFSUtil#getInvalidateWorkPctPerIteration() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestReplicationPolicy.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * This testcase tests whether the default value returned by
 * DFSUtil.getInvalidateWorkPctPerIteration() is positive, 
 * and whether an IllegalArgumentException will be thrown 
 * when 0.0f is retrieved
 */
@Test
public void testGetInvalidateWorkPctPerIteration() {
  Configuration conf = new Configuration();
  float blocksInvalidateWorkPct = DFSUtil
      .getInvalidateWorkPctPerIteration(conf);
  assertTrue(blocksInvalidateWorkPct > 0);

  conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
      "0.5f");
  blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
  assertEquals(blocksInvalidateWorkPct, 0.5f, blocksInvalidateWorkPct * 1e-7);
  
  conf.set(DFSConfigKeys.
      DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "1.0f");
  blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
  assertEquals(blocksInvalidateWorkPct, 1.0f, blocksInvalidateWorkPct * 1e-7);
  
  conf.set(DFSConfigKeys.
      DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "0.0f");
  exception.expect(IllegalArgumentException.class);
  blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
 
Example 2
Source File: TestReplicationPolicy.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * This testcase tests whether the default value returned by
 * DFSUtil.getInvalidateWorkPctPerIteration() is positive, 
 * and whether an IllegalArgumentException will be thrown 
 * when 0.0f is retrieved
 */
@Test
public void testGetInvalidateWorkPctPerIteration() {
  Configuration conf = new Configuration();
  float blocksInvalidateWorkPct = DFSUtil
      .getInvalidateWorkPctPerIteration(conf);
  assertTrue(blocksInvalidateWorkPct > 0);

  conf.set(DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
      "0.5f");
  blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
  assertEquals(blocksInvalidateWorkPct, 0.5f, blocksInvalidateWorkPct * 1e-7);
  
  conf.set(DFSConfigKeys.
      DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "1.0f");
  blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
  assertEquals(blocksInvalidateWorkPct, 1.0f, blocksInvalidateWorkPct * 1e-7);
  
  conf.set(DFSConfigKeys.
      DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "0.0f");
  exception.expect(IllegalArgumentException.class);
  blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
 
Example 3
Source File: TestReplicationPolicy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * This testcase tests whether an IllegalArgumentException 
 * will be thrown when a negative value is retrieved by 
 * DFSUtil#getInvalidateWorkPctPerIteration
 */
@Test
public void testGetInvalidateWorkPctPerIteration_NegativeValue() {
  Configuration conf = new Configuration();
  float blocksInvalidateWorkPct = DFSUtil
      .getInvalidateWorkPctPerIteration(conf);
  assertTrue(blocksInvalidateWorkPct > 0);
  
  conf.set(DFSConfigKeys.
      DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "-0.5f");
  exception.expect(IllegalArgumentException.class);
  blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
 
Example 4
Source File: TestReplicationPolicy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * This testcase tests whether an IllegalArgumentException 
 * will be thrown when a value greater than 1 is retrieved by 
 * DFSUtil#getInvalidateWorkPctPerIteration
 */
@Test
public void testGetInvalidateWorkPctPerIteration_GreaterThanOne() {
  Configuration conf = new Configuration();
  float blocksInvalidateWorkPct = DFSUtil
      .getInvalidateWorkPctPerIteration(conf);
  assertTrue(blocksInvalidateWorkPct > 0);
  
  conf.set(DFSConfigKeys.
      DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "1.5f");
  exception.expect(IllegalArgumentException.class);
  blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
 
Example 5
Source File: TestReplicationPolicy.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * This testcase tests whether an IllegalArgumentException 
 * will be thrown when a negative value is retrieved by 
 * DFSUtil#getInvalidateWorkPctPerIteration
 */
@Test
public void testGetInvalidateWorkPctPerIteration_NegativeValue() {
  Configuration conf = new Configuration();
  float blocksInvalidateWorkPct = DFSUtil
      .getInvalidateWorkPctPerIteration(conf);
  assertTrue(blocksInvalidateWorkPct > 0);
  
  conf.set(DFSConfigKeys.
      DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "-0.5f");
  exception.expect(IllegalArgumentException.class);
  blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
 
Example 6
Source File: TestReplicationPolicy.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * This testcase tests whether an IllegalArgumentException 
 * will be thrown when a value greater than 1 is retrieved by 
 * DFSUtil#getInvalidateWorkPctPerIteration
 */
@Test
public void testGetInvalidateWorkPctPerIteration_GreaterThanOne() {
  Configuration conf = new Configuration();
  float blocksInvalidateWorkPct = DFSUtil
      .getInvalidateWorkPctPerIteration(conf);
  assertTrue(blocksInvalidateWorkPct > 0);
  
  conf.set(DFSConfigKeys.
      DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, "1.5f");
  exception.expect(IllegalArgumentException.class);
  blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
}
 
Example 7
Source File: BlockManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public BlockManager(final Namesystem namesystem, final Configuration conf)
  throws IOException {
  this.namesystem = namesystem;
  datanodeManager = new DatanodeManager(this, namesystem, conf);
  heartbeatManager = datanodeManager.getHeartbeatManager();

  startupDelayBlockDeletionInMs = conf.getLong(
      DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
      DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L;
  invalidateBlocks = new InvalidateBlocks(
      datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs);

  // Compute the map capacity by allocating 2% of total memory
  blocksMap = new BlocksMap(
      LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
  blockplacement = BlockPlacementPolicy.getInstance(
    conf, datanodeManager.getFSClusterStats(),
    datanodeManager.getNetworkTopology(),
    datanodeManager.getHost2DatanodeMap());
  storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite();
  pendingReplications = new PendingReplicationBlocks(conf.getInt(
    DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
    DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);

  blockTokenSecretManager = createBlockTokenSecretManager(conf);

  this.maxCorruptFilesReturned = conf.getInt(
    DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY,
    DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED);
  this.defaultReplication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
                                        DFSConfigKeys.DFS_REPLICATION_DEFAULT);

  final int maxR = conf.getInt(DFSConfigKeys.DFS_REPLICATION_MAX_KEY, 
                               DFSConfigKeys.DFS_REPLICATION_MAX_DEFAULT);
  final int minR = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
                               DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
  if (minR <= 0)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
        + " = " + minR + " <= 0");
  if (maxR > Short.MAX_VALUE)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_REPLICATION_MAX_KEY
        + " = " + maxR + " > " + Short.MAX_VALUE);
  if (minR > maxR)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
        + " = " + minR + " > "
        + DFSConfigKeys.DFS_REPLICATION_MAX_KEY
        + " = " + maxR);
  this.minReplication = (short)minR;
  this.maxReplication = (short)maxR;

  this.maxReplicationStreams =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
  this.replicationStreamsHardLimit =
      conf.getInt(
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
  this.shouldCheckForEnoughRacks =
      conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
          ? false : true;

  this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
  this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);

  this.replicationRecheckInterval = 
    conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
                DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L;
  
  this.encryptDataTransfer =
      conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY,
          DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
  
  this.maxNumBlocksToLog =
      conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
          DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
  this.numBlocksPerIteration = conf.getInt(
      DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT,
      DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT);
  
  LOG.info("defaultReplication         = " + defaultReplication);
  LOG.info("maxReplication             = " + maxReplication);
  LOG.info("minReplication             = " + minReplication);
  LOG.info("maxReplicationStreams      = " + maxReplicationStreams);
  LOG.info("shouldCheckForEnoughRacks  = " + shouldCheckForEnoughRacks);
  LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
  LOG.info("encryptDataTransfer        = " + encryptDataTransfer);
  LOG.info("maxNumBlocksToLog          = " + maxNumBlocksToLog);
}
 
Example 8
Source File: BlockManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
public BlockManager(final Namesystem namesystem, final Configuration conf)
  throws IOException {
  this.namesystem = namesystem;
  datanodeManager = new DatanodeManager(this, namesystem, conf);
  heartbeatManager = datanodeManager.getHeartbeatManager();

  startupDelayBlockDeletionInMs = conf.getLong(
      DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
      DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_DEFAULT) * 1000L;
  invalidateBlocks = new InvalidateBlocks(
      datanodeManager.blockInvalidateLimit, startupDelayBlockDeletionInMs);

  // Compute the map capacity by allocating 2% of total memory
  blocksMap = new BlocksMap(
      LightWeightGSet.computeCapacity(2.0, "BlocksMap"));
  blockplacement = BlockPlacementPolicy.getInstance(
    conf, datanodeManager.getFSClusterStats(),
    datanodeManager.getNetworkTopology(),
    datanodeManager.getHost2DatanodeMap());
  storagePolicySuite = BlockStoragePolicySuite.createDefaultSuite();
  pendingReplications = new PendingReplicationBlocks(conf.getInt(
    DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
    DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);

  blockTokenSecretManager = createBlockTokenSecretManager(conf);

  this.maxCorruptFilesReturned = conf.getInt(
    DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY,
    DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED);
  this.defaultReplication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 
                                        DFSConfigKeys.DFS_REPLICATION_DEFAULT);

  final int maxR = conf.getInt(DFSConfigKeys.DFS_REPLICATION_MAX_KEY, 
                               DFSConfigKeys.DFS_REPLICATION_MAX_DEFAULT);
  final int minR = conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,
                               DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
  if (minR <= 0)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
        + " = " + minR + " <= 0");
  if (maxR > Short.MAX_VALUE)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_REPLICATION_MAX_KEY
        + " = " + maxR + " > " + Short.MAX_VALUE);
  if (minR > maxR)
    throw new IOException("Unexpected configuration parameters: "
        + DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY
        + " = " + minR + " > "
        + DFSConfigKeys.DFS_REPLICATION_MAX_KEY
        + " = " + maxR);
  this.minReplication = (short)minR;
  this.maxReplication = (short)maxR;

  this.maxReplicationStreams =
      conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY,
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_DEFAULT);
  this.replicationStreamsHardLimit =
      conf.getInt(
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
          DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
  this.shouldCheckForEnoughRacks =
      conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
          ? false : true;

  this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
  this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);

  this.replicationRecheckInterval = 
    conf.getInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 
                DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000L;
  
  this.encryptDataTransfer =
      conf.getBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY,
          DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEFAULT);
  
  this.maxNumBlocksToLog =
      conf.getLong(DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
          DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
  this.numBlocksPerIteration = conf.getInt(
      DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT,
      DFSConfigKeys.DFS_BLOCK_MISREPLICATION_PROCESSING_LIMIT_DEFAULT);
  
  LOG.info("defaultReplication         = " + defaultReplication);
  LOG.info("maxReplication             = " + maxReplication);
  LOG.info("minReplication             = " + minReplication);
  LOG.info("maxReplicationStreams      = " + maxReplicationStreams);
  LOG.info("shouldCheckForEnoughRacks  = " + shouldCheckForEnoughRacks);
  LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
  LOG.info("encryptDataTransfer        = " + encryptDataTransfer);
  LOG.info("maxNumBlocksToLog          = " + maxNumBlocksToLog);
}