Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil#resetLastUpdatesWithOffset()

The following examples show how to use org.apache.hadoop.hdfs.DFSTestUtil#resetLastUpdatesWithOffset() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestReplicationPolicy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testChooseTargetWithStaleNodes() throws Exception {
  // Set dataNodes[0] as stale
  DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[0], -(staleInterval + 1));
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
  assertTrue(namenode.getNamesystem().getBlockManager()
      .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
  DatanodeStorageInfo[] targets;
  // We set the datanode[0] as stale, thus should choose datanode[1] since
  // datanode[1] is on the same rack with datanode[0] (writer)
  targets = chooseTarget(1);
  assertEquals(targets.length, 1);
  assertEquals(storages[1], targets[0]);

  Set<Node> excludedNodes = new HashSet<Node>();
  excludedNodes.add(dataNodes[1]);
  List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
  targets = chooseTarget(1, chosenNodes, excludedNodes);
  assertEquals(targets.length, 1);
  assertFalse(isOnSameRack(targets[0], dataNodes[0]));
  
  // reset
  DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[0], 0);
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
 
Example 2
Source File: TestReplicationPolicy.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testChooseTargetWithStaleNodes() throws Exception {
  // Set dataNodes[0] as stale
  DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[0], -(staleInterval + 1));
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
  assertTrue(namenode.getNamesystem().getBlockManager()
      .getDatanodeManager().shouldAvoidStaleDataNodesForWrite());
  DatanodeStorageInfo[] targets;
  // We set the datanode[0] as stale, thus should choose datanode[1] since
  // datanode[1] is on the same rack with datanode[0] (writer)
  targets = chooseTarget(1);
  assertEquals(targets.length, 1);
  assertEquals(storages[1], targets[0]);

  Set<Node> excludedNodes = new HashSet<Node>();
  excludedNodes.add(dataNodes[1]);
  List<DatanodeStorageInfo> chosenNodes = new ArrayList<DatanodeStorageInfo>();
  targets = chooseTarget(1, chosenNodes, excludedNodes);
  assertEquals(targets.length, 1);
  assertFalse(isOnSameRack(targets[0], dataNodes[0]));
  
  // reset
  DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[0], 0);
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
 
Example 3
Source File: TestReplicationPolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * In this testcase, we set 3 nodes (dataNodes[0] ~ dataNodes[2]) as stale,
 * and when the number of replicas is less or equal to 3, all the healthy
 * datanodes should be returned by the chooseTarget method. When the number 
 * of replicas is 4, a stale node should be included.
 * 
 * @throws Exception
 */
@Test
public void testChooseTargetWithHalfStaleNodes() throws Exception {
  // Set dataNodes[0], dataNodes[1], and dataNodes[2] as stale
  for (int i = 0; i < 3; i++) {
    DFSTestUtil
        .resetLastUpdatesWithOffset(dataNodes[i], -(staleInterval + 1));
  }
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();

  DatanodeStorageInfo[] targets = chooseTarget(0);
  assertEquals(targets.length, 0);

  // Since we have 6 datanodes total, stale nodes should
  // not be returned until we ask for more than 3 targets
  targets = chooseTarget(1);
  assertEquals(targets.length, 1);
  assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));

  targets = chooseTarget(2);
  assertEquals(targets.length, 2);
  assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
  assertFalse(containsWithinRange(targets[1], dataNodes, 0, 2));

  targets = chooseTarget(3);
  assertEquals(targets.length, 3);
  assertTrue(containsWithinRange(targets[0], dataNodes, 3, 5));
  assertTrue(containsWithinRange(targets[1], dataNodes, 3, 5));
  assertTrue(containsWithinRange(targets[2], dataNodes, 3, 5));

  targets = chooseTarget(4);
  assertEquals(targets.length, 4);
  assertTrue(containsWithinRange(dataNodes[3], targets, 0, 3));
  assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3));
  assertTrue(containsWithinRange(dataNodes[5], targets, 0, 3));

  for (int i = 0; i < dataNodes.length; i++) {
    DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0);
  }
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
 
Example 4
Source File: TestReplicationPolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test for the chooseReplicaToDelete are processed based on 
 * block locality and free space
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
  final Map<String, List<DatanodeStorageInfo>> rackMap
      = new HashMap<String, List<DatanodeStorageInfo>>();
  
  dataNodes[0].setRemaining(4*1024*1024);
  replicaList.add(storages[0]);
  
  dataNodes[1].setRemaining(3*1024*1024);
  replicaList.add(storages[1]);
  
  dataNodes[2].setRemaining(2*1024*1024);
  replicaList.add(storages[2]);
  
  dataNodes[5].setRemaining(1*1024*1024);
  replicaList.add(storages[5]);
  
  // Refresh the last update time for all the datanodes
  for (int i = 0; i < dataNodes.length; i++) {
    DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0);
  }
  
  List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
  List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
  replicator.splitNodesWithRack(replicaList, rackMap, first, second);
  // storages[0] and storages[1] are in first set as their rack has two 
  // replica nodes, while storages[2] and dataNodes[5] are in second set.
  assertEquals(2, first.size());
  assertEquals(2, second.size());
  List<StorageType> excessTypes = new ArrayList<StorageType>();
  {
    // test returning null
    excessTypes.add(StorageType.SSD);
    assertNull(replicator.chooseReplicaToDelete(
        null, null, (short)3, first, second, excessTypes));
  }
  excessTypes.add(StorageType.DEFAULT);
  DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
      null, null, (short)3, first, second, excessTypes);
  // Within first set, storages[1] with less free space
  assertEquals(chosen, storages[1]);

  replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
  assertEquals(0, first.size());
  assertEquals(3, second.size());
  // Within second set, storages[5] with less free space
  excessTypes.add(StorageType.DEFAULT);
  chosen = replicator.chooseReplicaToDelete(
      null, null, (short)2, first, second, excessTypes);
  assertEquals(chosen, storages[5]);
}
 
Example 5
Source File: TestBlockInfoUnderConstruction.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testInitializeBlockRecovery() throws Exception {
  DatanodeStorageInfo s1 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.1", "s1");
  DatanodeDescriptor dd1 = s1.getDatanodeDescriptor();
  DatanodeStorageInfo s2 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.2", "s2");
  DatanodeDescriptor dd2 = s2.getDatanodeDescriptor();
  DatanodeStorageInfo s3 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.3", "s3");
  DatanodeDescriptor dd3 = s3.getDatanodeDescriptor();

  dd1.isAlive = dd2.isAlive = dd3.isAlive = true;
  BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(
      new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP),
      (short) 3,
      BlockUCState.UNDER_CONSTRUCTION,
      new DatanodeStorageInfo[] {s1, s2, s3});

  // Recovery attempt #1.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000);
  blockInfo.initializeBlockRecovery(1);
  BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #2.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
  blockInfo.initializeBlockRecovery(2);
  blockInfoRecovery = dd1.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #3.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
  blockInfo.initializeBlockRecovery(3);
  blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #4.
  // Reset everything. And again pick DN with most recent heart beat.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
  blockInfo.initializeBlockRecovery(3);
  blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);
}
 
Example 6
Source File: TestReplicationPolicy.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * In this testcase, we set 3 nodes (dataNodes[0] ~ dataNodes[2]) as stale,
 * and when the number of replicas is less or equal to 3, all the healthy
 * datanodes should be returned by the chooseTarget method. When the number 
 * of replicas is 4, a stale node should be included.
 * 
 * @throws Exception
 */
@Test
public void testChooseTargetWithHalfStaleNodes() throws Exception {
  // Set dataNodes[0], dataNodes[1], and dataNodes[2] as stale
  for (int i = 0; i < 3; i++) {
    DFSTestUtil
        .resetLastUpdatesWithOffset(dataNodes[i], -(staleInterval + 1));
  }
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();

  DatanodeStorageInfo[] targets = chooseTarget(0);
  assertEquals(targets.length, 0);

  // Since we have 6 datanodes total, stale nodes should
  // not be returned until we ask for more than 3 targets
  targets = chooseTarget(1);
  assertEquals(targets.length, 1);
  assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));

  targets = chooseTarget(2);
  assertEquals(targets.length, 2);
  assertFalse(containsWithinRange(targets[0], dataNodes, 0, 2));
  assertFalse(containsWithinRange(targets[1], dataNodes, 0, 2));

  targets = chooseTarget(3);
  assertEquals(targets.length, 3);
  assertTrue(containsWithinRange(targets[0], dataNodes, 3, 5));
  assertTrue(containsWithinRange(targets[1], dataNodes, 3, 5));
  assertTrue(containsWithinRange(targets[2], dataNodes, 3, 5));

  targets = chooseTarget(4);
  assertEquals(targets.length, 4);
  assertTrue(containsWithinRange(dataNodes[3], targets, 0, 3));
  assertTrue(containsWithinRange(dataNodes[4], targets, 0, 3));
  assertTrue(containsWithinRange(dataNodes[5], targets, 0, 3));

  for (int i = 0; i < dataNodes.length; i++) {
    DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0);
  }
  namenode.getNamesystem().getBlockManager()
    .getDatanodeManager().getHeartbeatManager().heartbeatCheck();
}
 
Example 7
Source File: TestReplicationPolicy.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test for the chooseReplicaToDelete are processed based on 
 * block locality and free space
 */
@Test
public void testChooseReplicaToDelete() throws Exception {
  List<DatanodeStorageInfo> replicaList = new ArrayList<DatanodeStorageInfo>();
  final Map<String, List<DatanodeStorageInfo>> rackMap
      = new HashMap<String, List<DatanodeStorageInfo>>();
  
  dataNodes[0].setRemaining(4*1024*1024);
  replicaList.add(storages[0]);
  
  dataNodes[1].setRemaining(3*1024*1024);
  replicaList.add(storages[1]);
  
  dataNodes[2].setRemaining(2*1024*1024);
  replicaList.add(storages[2]);
  
  dataNodes[5].setRemaining(1*1024*1024);
  replicaList.add(storages[5]);
  
  // Refresh the last update time for all the datanodes
  for (int i = 0; i < dataNodes.length; i++) {
    DFSTestUtil.resetLastUpdatesWithOffset(dataNodes[i], 0);
  }
  
  List<DatanodeStorageInfo> first = new ArrayList<DatanodeStorageInfo>();
  List<DatanodeStorageInfo> second = new ArrayList<DatanodeStorageInfo>();
  replicator.splitNodesWithRack(replicaList, rackMap, first, second);
  // storages[0] and storages[1] are in first set as their rack has two 
  // replica nodes, while storages[2] and dataNodes[5] are in second set.
  assertEquals(2, first.size());
  assertEquals(2, second.size());
  List<StorageType> excessTypes = new ArrayList<StorageType>();
  {
    // test returning null
    excessTypes.add(StorageType.SSD);
    assertNull(replicator.chooseReplicaToDelete(
        null, null, (short)3, first, second, excessTypes));
  }
  excessTypes.add(StorageType.DEFAULT);
  DatanodeStorageInfo chosen = replicator.chooseReplicaToDelete(
      null, null, (short)3, first, second, excessTypes);
  // Within first set, storages[1] with less free space
  assertEquals(chosen, storages[1]);

  replicator.adjustSetsWithChosenReplica(rackMap, first, second, chosen);
  assertEquals(0, first.size());
  assertEquals(3, second.size());
  // Within second set, storages[5] with less free space
  excessTypes.add(StorageType.DEFAULT);
  chosen = replicator.chooseReplicaToDelete(
      null, null, (short)2, first, second, excessTypes);
  assertEquals(chosen, storages[5]);
}
 
Example 8
Source File: TestBlockInfoUnderConstruction.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testInitializeBlockRecovery() throws Exception {
  DatanodeStorageInfo s1 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.1", "s1");
  DatanodeDescriptor dd1 = s1.getDatanodeDescriptor();
  DatanodeStorageInfo s2 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.2", "s2");
  DatanodeDescriptor dd2 = s2.getDatanodeDescriptor();
  DatanodeStorageInfo s3 = DFSTestUtil.createDatanodeStorageInfo("10.10.1.3", "s3");
  DatanodeDescriptor dd3 = s3.getDatanodeDescriptor();

  dd1.isAlive = dd2.isAlive = dd3.isAlive = true;
  BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(
      new Block(0, 0, GenerationStamp.LAST_RESERVED_STAMP),
      (short) 3,
      BlockUCState.UNDER_CONSTRUCTION,
      new DatanodeStorageInfo[] {s1, s2, s3});

  // Recovery attempt #1.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -3 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -2 * 1000);
  blockInfo.initializeBlockRecovery(1);
  BlockInfoContiguousUnderConstruction[] blockInfoRecovery = dd2.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #2.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
  blockInfo.initializeBlockRecovery(2);
  blockInfoRecovery = dd1.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #3.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, -3 * 1000);
  blockInfo.initializeBlockRecovery(3);
  blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);

  // Recovery attempt #4.
  // Reset everything. And again pick DN with most recent heart beat.
  DFSTestUtil.resetLastUpdatesWithOffset(dd1, -2 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd2, -1 * 1000);
  DFSTestUtil.resetLastUpdatesWithOffset(dd3, 0);
  blockInfo.initializeBlockRecovery(3);
  blockInfoRecovery = dd3.getLeaseRecoveryCommand(1);
  assertEquals(blockInfoRecovery[0], blockInfo);
}