Java Code Examples for org.apache.hadoop.hdfs.server.namenode.FSNamesystem#readLock()

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.FSNamesystem#readLock() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BlockManagerTestUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** @return the datanode descriptor for the given the given storageID. */
public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
    final String storageID) {
  ns.readLock();
  try {
    return ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
  } finally {
    ns.readUnlock();
  }
}
 
Example 2
Source File: BlockManagerTestUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * @return a tuple of the replica state (number racks, number live
 * replicas, and number needed replicas) for the given block.
 */
public static int[] getReplicaInfo(final FSNamesystem namesystem, final Block b) {
  final BlockManager bm = namesystem.getBlockManager();
  namesystem.readLock();
  try {
    return new int[]{getNumberOfRacks(bm, b),
        bm.countNodes(b).liveReplicas(),
        bm.neededReplications.contains(b) ? 1 : 0};
  } finally {
    namesystem.readUnlock();
  }
}
 
Example 3
Source File: TestNodeCount.java    From hadoop with Apache License 2.0 5 votes vote down vote up
NumberReplicas countNodes(Block block, FSNamesystem namesystem) {
  namesystem.readLock();
  try {
    lastBlock = block;
    lastNum = namesystem.getBlockManager().countNodes(block);
    return lastNum;
  }
  finally {
    namesystem.readUnlock();
  }
}
 
Example 4
Source File: BlockManagerTestUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** @return the datanode descriptor for the given the given storageID. */
public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
    final String storageID) {
  ns.readLock();
  try {
    return ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
  } finally {
    ns.readUnlock();
  }
}
 
Example 5
Source File: BlockManagerTestUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * @return a tuple of the replica state (number racks, number live
 * replicas, and number needed replicas) for the given block.
 */
public static int[] getReplicaInfo(final FSNamesystem namesystem, final Block b) {
  final BlockManager bm = namesystem.getBlockManager();
  namesystem.readLock();
  try {
    return new int[]{getNumberOfRacks(bm, b),
        bm.countNodes(b).liveReplicas(),
        bm.neededReplications.contains(b) ? 1 : 0};
  } finally {
    namesystem.readUnlock();
  }
}
 
Example 6
Source File: TestNodeCount.java    From big-c with Apache License 2.0 5 votes vote down vote up
NumberReplicas countNodes(Block block, FSNamesystem namesystem) {
  namesystem.readLock();
  try {
    lastBlock = block;
    lastNum = namesystem.getBlockManager().countNodes(block);
    return lastNum;
  }
  finally {
    namesystem.readUnlock();
  }
}
 
Example 7
Source File: TestOverReplicatedBlocks.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * The test verifies that replica for deletion is chosen on a node,
 * with the oldest heartbeat, when this heartbeat is larger than the
 * tolerable heartbeat interval.
 * It creates a file with several blocks and replication 4.
 * The last DN is configured to send heartbeats rarely.
 * 
 * Test waits until the tolerable heartbeat interval expires, and reduces
 * replication of the file. All replica deletions should be scheduled for the
 * last node. No replicas will actually be deleted, since last DN doesn't
 * send heartbeats. 
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
    cluster.startDataNodes(conf, 1, true, null, null, null);
    DataNode lastDN = cluster.getDataNodes().get(3);
    DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
        lastDN, namesystem.getBlockPoolId());
    String lastDNid = dnReg.getDatanodeUuid();

    final Path fileName = new Path("/foo2");
    DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)4);

    // Wait for tolerable number of heartbeats plus one
    DatanodeDescriptor nodeInfo = null;
    long lastHeartbeat = 0;
    long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
      (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
    do {
      nodeInfo = namesystem.getBlockManager().getDatanodeManager()
          .getDatanode(dnReg);
      lastHeartbeat = nodeInfo.getLastUpdateMonotonic();
    } while (monotonicNow() - lastHeartbeat < waitTime);
    fs.setReplication(fileName, (short)3);

    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);

    // All replicas for deletion should be scheduled on lastDN.
    // And should not actually be deleted, because lastDN does not heartbeat.
    namesystem.readLock();
    Collection<Block> dnBlocks = 
      namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
    assertEquals("Replicas on node " + lastDNid + " should have been deleted",
        SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
    namesystem.readUnlock();
    for(BlockLocation location : locs)
      assertEquals("Block should still have 4 replicas",
          4, location.getNames().length);
  } finally {
    if(fs != null) fs.close();
    if(cluster != null) cluster.shutdown();
  }
}
 
Example 8
Source File: TestOverReplicatedBlocks.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * The test verifies that replica for deletion is chosen on a node,
 * with the oldest heartbeat, when this heartbeat is larger than the
 * tolerable heartbeat interval.
 * It creates a file with several blocks and replication 4.
 * The last DN is configured to send heartbeats rarely.
 * 
 * Test waits until the tolerable heartbeat interval expires, and reduces
 * replication of the file. All replica deletions should be scheduled for the
 * last node. No replicas will actually be deleted, since last DN doesn't
 * send heartbeats. 
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, SMALL_BLOCK_SIZE);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
    fs = cluster.getFileSystem();
    final FSNamesystem namesystem = cluster.getNamesystem();

    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 300);
    cluster.startDataNodes(conf, 1, true, null, null, null);
    DataNode lastDN = cluster.getDataNodes().get(3);
    DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
        lastDN, namesystem.getBlockPoolId());
    String lastDNid = dnReg.getDatanodeUuid();

    final Path fileName = new Path("/foo2");
    DFSTestUtil.createFile(fs, fileName, SMALL_FILE_LENGTH, (short)4, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)4);

    // Wait for tolerable number of heartbeats plus one
    DatanodeDescriptor nodeInfo = null;
    long lastHeartbeat = 0;
    long waitTime = DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT * 1000 *
      (DFSConfigKeys.DFS_NAMENODE_TOLERATE_HEARTBEAT_MULTIPLIER_DEFAULT + 1);
    do {
      nodeInfo = namesystem.getBlockManager().getDatanodeManager()
          .getDatanode(dnReg);
      lastHeartbeat = nodeInfo.getLastUpdateMonotonic();
    } while (monotonicNow() - lastHeartbeat < waitTime);
    fs.setReplication(fileName, (short)3);

    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);

    // All replicas for deletion should be scheduled on lastDN.
    // And should not actually be deleted, because lastDN does not heartbeat.
    namesystem.readLock();
    Collection<Block> dnBlocks = 
      namesystem.getBlockManager().excessReplicateMap.get(lastDNid);
    assertEquals("Replicas on node " + lastDNid + " should have been deleted",
        SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks.size());
    namesystem.readUnlock();
    for(BlockLocation location : locs)
      assertEquals("Block should still have 4 replicas",
          4, location.getNames().length);
  } finally {
    if(fs != null) fs.close();
    if(cluster != null) cluster.shutdown();
  }
}