Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil#createDatanodeStorageInfo()

The following examples show how to use org.apache.hadoop.hdfs.DFSTestUtil#createDatanodeStorageInfo() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestBlockInfo.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testReplaceStorage() throws Exception {

  // Create two dummy storages.
  final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
  final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
  final int NUM_BLOCKS = 10;
  BlockInfoContiguous[] blockInfos = new BlockInfoContiguous[NUM_BLOCKS];

  // Create a few dummy blocks and add them to the first storage.
  for (int i = 0; i < NUM_BLOCKS; ++i) {
    blockInfos[i] = new BlockInfoContiguous((short) 3);
    storage1.addBlock(blockInfos[i]);
  }

  // Try to move one of the blocks to a different storage.
  boolean added =
      storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
  Assert.assertThat(added, is(false));
  Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
 
Example 2
Source File: TestBlockInfo.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testReplaceStorage() throws Exception {

  // Create two dummy storages.
  final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
  final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
  final int NUM_BLOCKS = 10;
  BlockInfoContiguous[] blockInfos = new BlockInfoContiguous[NUM_BLOCKS];

  // Create a few dummy blocks and add them to the first storage.
  for (int i = 0; i < NUM_BLOCKS; ++i) {
    blockInfos[i] = new BlockInfoContiguous((short) 3);
    storage1.addBlock(blockInfos[i]);
  }

  // Try to move one of the blocks to a different storage.
  boolean added =
      storage2.addBlock(blockInfos[NUM_BLOCKS / 2]) == AddBlockResult.ADDED;
  Assert.assertThat(added, is(false));
  Assert.assertThat(blockInfos[NUM_BLOCKS/2].getStorageInfo(0), is(storage2));
}
 
Example 3
Source File: TestBlockInfo.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testAddStorage() throws Exception {
  BlockInfoContiguous blockInfo = new BlockInfoContiguous((short) 3);

  final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");

  boolean added = blockInfo.addStorage(storage);

  Assert.assertTrue(added);
  Assert.assertEquals(storage, blockInfo.getStorageInfo(0));
}
 
Example 4
Source File: TestBlockInfo.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testAddStorage() throws Exception {
  BlockInfoContiguous blockInfo = new BlockInfoContiguous((short) 3);

  final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");

  boolean added = blockInfo.addStorage(storage);

  Assert.assertTrue(added);
  Assert.assertEquals(storage, blockInfo.getStorageInfo(0));
}
 
Example 5
Source File: TestReplicationPolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * In this testcase, there are enough total number of nodes, but only
 * one rack is actually available.
 * @throws Exception
 */
@Test
public void testChooseTarget6() throws Exception {
  DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo(
      "DS-xxxx", "7.7.7.7", "/d2/r3", "host7");
  DatanodeDescriptor newDn = storage.getDatanodeDescriptor();
  Set<Node> excludedNodes;
  List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();

  excludedNodes = new HashSet<Node>();
  excludedNodes.add(dataNodes[0]);
  excludedNodes.add(dataNodes[1]);
  excludedNodes.add(dataNodes[2]);
  excludedNodes.add(dataNodes[3]);

  DatanodeStorageInfo[] targets;
  // Only two nodes available in a rack. Try picking two nodes. Only one
  // should return.
  targets = chooseTarget(2, chosenNodes, excludedNodes);
  assertEquals(1, targets.length);

  // Make three nodes available in a rack.
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  bm.getDatanodeManager().getNetworkTopology().add(newDn);
  bm.getDatanodeManager().getHeartbeatManager().addDatanode(newDn);
  updateHeartbeatWithUsage(newDn,
      2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);

  // Try picking three nodes. Only two should return.
  excludedNodes.clear();
  excludedNodes.add(dataNodes[0]);
  excludedNodes.add(dataNodes[1]);
  excludedNodes.add(dataNodes[2]);
  excludedNodes.add(dataNodes[3]);
  chosenNodes.clear();
  try {
    targets = chooseTarget(3, chosenNodes, excludedNodes);
    assertEquals(2, targets.length);
  } finally {
    bm.getDatanodeManager().getNetworkTopology().remove(newDn);
  }
  resetHeartbeatForStorages();
}
 
Example 6
Source File: TestBlockInfoUnderConstruction.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testInitializeBlockRecovery() throws Exception {
  DatanodeStorageInfo s1 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.1", "s1");
  DatanodeDescriptor dd1 = s1.getDatanodeDescriptor();
  DatanodeStorageInfo s2 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.2", "s2");
  DatanodeDescriptor dd2 = s2.getDatanodeDescriptor();
  DatanodeStorageInfo s3 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.3", "s3");
  DatanodeDescriptor dd3 = s3.getDatanodeDescriptor();

  dd1.isAlive = dd2.isAlive = dd3.isAlive = true;
  BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(
      new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP),
      (short) 3,
      BlockUCState.UNDER_CONSTRUCTION,
      new DatanodeStorageInfo[] {s1, s2, s3});

  // Recovery attempt #1.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000);
  blockInfo.initializeBlockRecovery(1);
  BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #2.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
  blockInfo.initializeBlockRecovery(2);
  blockInfoRecovery = dd1.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #3.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
  blockInfo.initializeBlockRecovery(3);
  blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #4.
  // Reset everything. And again pick DN with most recent heart beat.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
  blockInfo.initializeBlockRecovery(3);
  blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);
}
 
Example 7
Source File: TestBlockInfo.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testBlockListMoveToHead() throws Exception {
  LOG.info("BlockInfo moveToHead tests...");

  final int MAX_BLOCKS = 10;

  DatanodeStorageInfo dd = DFSTestUtil.createDatanodeStorageInfo("s1", "1.1.1.1");
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  ArrayList<BlockInfoContiguous> blockInfoList = new ArrayList<BlockInfoContiguous>();
  int headIndex;
  int curIndex;

  LOG.info("Building block list...");
  for (int i = 0; i < MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
    blockInfoList.add(new BlockInfoContiguous(blockList.get(i), (short) 3));
    dd.addBlock(blockInfoList.get(i));

    // index of the datanode should be 0
    assertEquals("Find datanode should be 0", 0, blockInfoList.get(i)
        .findStorageInfo(dd));
  }

  // list length should be equal to the number of blocks we inserted
  LOG.info("Checking list length...");
  assertEquals("Length should be MAX_BLOCK", MAX_BLOCKS, dd.numBlocks());
  Iterator<BlockInfoContiguous> it = dd.getBlockIterator();
  int len = 0;
  while (it.hasNext()) {
    it.next();
    len++;
  }
  assertEquals("There should be MAX_BLOCK blockInfo's", MAX_BLOCKS, len);

  headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd);

  LOG.info("Moving each block to the head of the list...");
  for (int i = 0; i < MAX_BLOCKS; i++) {
    curIndex = blockInfoList.get(i).findStorageInfo(dd);
    headIndex = dd.moveBlockToHead(blockInfoList.get(i), curIndex, headIndex);
    // the moved element must be at the head of the list
    assertEquals("Block should be at the head of the list now.",
        blockInfoList.get(i), dd.getBlockListHeadForTesting());
  }

  // move head of the list to the head - this should not change the list
  LOG.info("Moving head to the head...");

  BlockInfoContiguous temp = dd.getBlockListHeadForTesting();
  curIndex = 0;
  headIndex = 0;
  dd.moveBlockToHead(temp, curIndex, headIndex);
  assertEquals(
      "Moving head to the head of the list shopuld not change the list",
      temp, dd.getBlockListHeadForTesting());

  // check all elements of the list against the original blockInfoList
  LOG.info("Checking elements of the list...");
  temp = dd.getBlockListHeadForTesting();
  assertNotNull("Head should not be null", temp);
  int c = MAX_BLOCKS - 1;
  while (temp != null) {
    assertEquals("Expected element is not on the list",
        blockInfoList.get(c--), temp);
    temp = temp.getNext(0);
  }

  LOG.info("Moving random blocks to the head of the list...");
  headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd);
  Random rand = new Random();
  for (int i = 0; i < MAX_BLOCKS; i++) {
    int j = rand.nextInt(MAX_BLOCKS);
    curIndex = blockInfoList.get(j).findStorageInfo(dd);
    headIndex = dd.moveBlockToHead(blockInfoList.get(j), curIndex, headIndex);
    // the moved element must be at the head of the list
    assertEquals("Block should be at the head of the list now.",
        blockInfoList.get(j), dd.getBlockListHeadForTesting());
  }
}
 
Example 8
Source File: TestReplicationPolicy.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * In this testcase, there are enough total number of nodes, but only
 * one rack is actually available.
 * @throws Exception
 */
@Test
public void testChooseTarget6() throws Exception {
  DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo(
      "DS-xxxx", "7.7.7.7", "/d2/r3", "host7");
  DatanodeDescriptor newDn = storage.getDatanodeDescriptor();
  Set<Node> excludedNodes;
  List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();

  excludedNodes = new HashSet<Node>();
  excludedNodes.add(dataNodes[0]);
  excludedNodes.add(dataNodes[1]);
  excludedNodes.add(dataNodes[2]);
  excludedNodes.add(dataNodes[3]);

  DatanodeStorageInfo[] targets;
  // Only two nodes available in a rack. Try picking two nodes. Only one
  // should return.
  targets = chooseTarget(2, chosenNodes, excludedNodes);
  assertEquals(1, targets.length);

  // Make three nodes available in a rack.
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  bm.getDatanodeManager().getNetworkTopology().add(newDn);
  bm.getDatanodeManager().getHeartbeatManager().addDatanode(newDn);
  updateHeartbeatWithUsage(newDn,
      2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
      2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);

  // Try picking three nodes. Only two should return.
  excludedNodes.clear();
  excludedNodes.add(dataNodes[0]);
  excludedNodes.add(dataNodes[1]);
  excludedNodes.add(dataNodes[2]);
  excludedNodes.add(dataNodes[3]);
  chosenNodes.clear();
  try {
    targets = chooseTarget(3, chosenNodes, excludedNodes);
    assertEquals(2, targets.length);
  } finally {
    bm.getDatanodeManager().getNetworkTopology().remove(newDn);
  }
}
 
Example 9
Source File: TestBlockInfoUnderConstruction.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testInitializeBlockRecovery() throws Exception {
  DatanodeStorageInfo s1 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.1", "s1");
  DatanodeDescriptor dd1 = s1.getDatanodeDescriptor();
  DatanodeStorageInfo s2 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.2", "s2");
  DatanodeDescriptor dd2 = s2.getDatanodeDescriptor();
  DatanodeStorageInfo s3 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.3", "s3");
  DatanodeDescriptor dd3 = s3.getDatanodeDescriptor();

  dd1.isAlive = dd2.isAlive = dd3.isAlive = true;
  BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(
      new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP),
      (short) 3,
      BlockUCState.UNDER_CONSTRUCTION,
      new DatanodeStorageInfo[] {s1, s2, s3});

  // Recovery attempt #1.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000);
  blockInfo.initializeBlockRecovery(1);
  BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #2.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
  blockInfo.initializeBlockRecovery(2);
  blockInfoRecovery = dd1.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #3.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
  blockInfo.initializeBlockRecovery(3);
  blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #4.
  // Reset everything. And again pick DN with most recent heart beat.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
  blockInfo.initializeBlockRecovery(3);
  blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);
}
 
Example 10
Source File: TestBlockInfo.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testBlockListMoveToHead() throws Exception {
  LOG.info("BlockInfo moveToHead tests...");

  final int MAX_BLOCKS = 10;

  DatanodeStorageInfo dd = DFSTestUtil.createDatanodeStorageInfo("s1", "1.1.1.1");
  ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
  ArrayList<BlockInfoContiguous> blockInfoList = new ArrayList<BlockInfoContiguous>();
  int headIndex;
  int curIndex;

  LOG.info("Building block list...");
  for (int i = 0; i < MAX_BLOCKS; i++) {
    blockList.add(new Block(i, 0, GenerationStamp.LAST_RESERVED_STAMP));
    blockInfoList.add(new BlockInfoContiguous(blockList.get(i), (short) 3));
    dd.addBlock(blockInfoList.get(i));

    // index of the datanode should be 0
    assertEquals("Find datanode should be 0", 0, blockInfoList.get(i)
        .findStorageInfo(dd));
  }

  // list length should be equal to the number of blocks we inserted
  LOG.info("Checking list length...");
  assertEquals("Length should be MAX_BLOCK", MAX_BLOCKS, dd.numBlocks());
  Iterator<BlockInfoContiguous> it = dd.getBlockIterator();
  int len = 0;
  while (it.hasNext()) {
    it.next();
    len++;
  }
  assertEquals("There should be MAX_BLOCK blockInfo's", MAX_BLOCKS, len);

  headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd);

  LOG.info("Moving each block to the head of the list...");
  for (int i = 0; i < MAX_BLOCKS; i++) {
    curIndex = blockInfoList.get(i).findStorageInfo(dd);
    headIndex = dd.moveBlockToHead(blockInfoList.get(i), curIndex, headIndex);
    // the moved element must be at the head of the list
    assertEquals("Block should be at the head of the list now.",
        blockInfoList.get(i), dd.getBlockListHeadForTesting());
  }

  // move head of the list to the head - this should not change the list
  LOG.info("Moving head to the head...");

  BlockInfoContiguous temp = dd.getBlockListHeadForTesting();
  curIndex = 0;
  headIndex = 0;
  dd.moveBlockToHead(temp, curIndex, headIndex);
  assertEquals(
      "Moving head to the head of the list shopuld not change the list",
      temp, dd.getBlockListHeadForTesting());

  // check all elements of the list against the original blockInfoList
  LOG.info("Checking elements of the list...");
  temp = dd.getBlockListHeadForTesting();
  assertNotNull("Head should not be null", temp);
  int c = MAX_BLOCKS - 1;
  while (temp != null) {
    assertEquals("Expected element is not on the list",
        blockInfoList.get(c--), temp);
    temp = temp.getNext(0);
  }

  LOG.info("Moving random blocks to the head of the list...");
  headIndex = dd.getBlockListHeadForTesting().findStorageInfo(dd);
  Random rand = new Random();
  for (int i = 0; i < MAX_BLOCKS; i++) {
    int j = rand.nextInt(MAX_BLOCKS);
    curIndex = blockInfoList.get(j).findStorageInfo(dd);
    headIndex = dd.moveBlockToHead(blockInfoList.get(j), curIndex, headIndex);
    // the moved element must be at the head of the list
    assertEquals("Block should be at the head of the list now.",
        blockInfoList.get(j), dd.getBlockListHeadForTesting());
  }
}