Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil#getDatanodeDescriptor()

The following examples show how to use org.apache.hadoop.hdfs.DFSTestUtil#getDatanodeDescriptor() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestReplicationPolicy.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * In this testcase, client is is a node outside of file system.
 * So the 1st replica can be placed on any node. 
 * the 2nd replica should be placed on a different rack,
 * the 3rd replica should be placed on the same rack as the 2nd replica,
 * @throws Exception
 */
@Test
public void testChooseTarget5() throws Exception {
  DatanodeDescriptor writerDesc =
    DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4");

  DatanodeStorageInfo[] targets;
  targets = chooseTarget(0, writerDesc);
  assertEquals(targets.length, 0);

  targets = chooseTarget(1, writerDesc);
  assertEquals(targets.length, 1);

  targets = chooseTarget(2, writerDesc);
  assertEquals(targets.length, 2);
  assertFalse(isOnSameRack(targets[0], targets[1]));

  targets = chooseTarget(3, writerDesc);
  assertEquals(targets.length, 3);
  assertTrue(isOnSameRack(targets[1], targets[2]));
  assertFalse(isOnSameRack(targets[0], targets[1]));
}
 
Example 2
Source File: TestNetworkTopology.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateInvalidTopology() throws Exception {
  NetworkTopology invalCluster = new NetworkTopology();
  DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] {
      DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1")
  };
  invalCluster.add(invalDataNodes[0]);
  invalCluster.add(invalDataNodes[1]);
  try {
    invalCluster.add(invalDataNodes[2]);
    fail("expected InvalidTopologyException");
  } catch (NetworkTopology.InvalidTopologyException e) {
    assertTrue(e.getMessage().startsWith("Failed to add "));
    assertTrue(e.getMessage().contains(
        "You cannot have a rack and a non-rack node at the same " +
        "level of the network topology."));
  }
}
 
Example 3
Source File: TestReplicationPolicy.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * In this testcase, client is is a node outside of file system.
 * So the 1st replica can be placed on any node. 
 * the 2nd replica should be placed on a different rack,
 * the 3rd replica should be placed on the same rack as the 2nd replica,
 * @throws Exception
 */
@Test
public void testChooseTarget5() throws Exception {
  DatanodeDescriptor writerDesc =
    DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r4");

  DatanodeStorageInfo[] targets;
  targets = chooseTarget(0, writerDesc);
  assertEquals(targets.length, 0);

  targets = chooseTarget(1, writerDesc);
  assertEquals(targets.length, 1);

  targets = chooseTarget(2, writerDesc);
  assertEquals(targets.length, 2);
  assertFalse(isOnSameRack(targets[0], targets[1]));

  targets = chooseTarget(3, writerDesc);
  assertEquals(targets.length, 3);
  assertTrue(isOnSameRack(targets[1], targets[2]));
  assertFalse(isOnSameRack(targets[0], targets[1]));
}
 
Example 4
Source File: TestNetworkTopology.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateInvalidTopology() throws Exception {
  NetworkTopology invalCluster = new NetworkTopology();
  DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] {
      DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1")
  };
  invalCluster.add(invalDataNodes[0]);
  invalCluster.add(invalDataNodes[1]);
  try {
    invalCluster.add(invalDataNodes[2]);
    fail("expected InvalidTopologyException");
  } catch (NetworkTopology.InvalidTopologyException e) {
    assertTrue(e.getMessage().startsWith("Failed to add "));
    assertTrue(e.getMessage().contains(
        "You cannot have a rack and a non-rack node at the same " +
        "level of the network topology."));
  }
}
 
Example 5
Source File: TestNetworkTopology.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testContains() throws Exception {
  DatanodeDescriptor nodeNotInMap = 
    DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r4");
  for (int i=0; i < dataNodes.length; i++) {
    assertTrue(cluster.contains(dataNodes[i]));
  }
  assertFalse(cluster.contains(nodeNotInMap));
}
 
Example 6
Source File: TestHost2NodesMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testRemove() throws Exception {
  DatanodeDescriptor nodeNotInMap =
    DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
  assertFalse(map.remove(nodeNotInMap));
  
  assertTrue(map.remove(dataNodes[0]));
  assertTrue(map.getDatanodeByHost("1.1.1.1.")==null);
  assertTrue(map.getDatanodeByHost("2.2.2.2")==dataNodes[1]);
  DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
  assertTrue(node==dataNodes[2] || node==dataNodes[3]);
  assertNull(map.getDatanodeByHost("4.4.4.4"));
  
  assertTrue(map.remove(dataNodes[2]));
  assertNull(map.getDatanodeByHost("1.1.1.1"));
  assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
  assertEquals(map.getDatanodeByHost("3.3.3.3"), dataNodes[3]);
  
  assertTrue(map.remove(dataNodes[3]));
  assertNull(map.getDatanodeByHost("1.1.1.1"));
  assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
  assertNull(map.getDatanodeByHost("3.3.3.3"));
  
  assertFalse(map.remove(null));
  assertTrue(map.remove(dataNodes[1]));
  assertFalse(map.remove(dataNodes[1]));
}
 
Example 7
Source File: TestHost2NodesMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testContains() throws Exception {
  DatanodeDescriptor nodeNotInMap =
    DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
  for (int i = 0; i < dataNodes.length; i++) {
    assertTrue(map.contains(dataNodes[i]));
  }
  assertFalse(map.contains(null));
  assertFalse(map.contains(nodeNotInMap));
}
 
Example 8
Source File: TestHost2NodesMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  dataNodes = new DatanodeDescriptor[] {
      DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", 5021, "/d1/r2"),
  };
  for (DatanodeDescriptor node : dataNodes) {
    map.add(node);
  }
  map.add(null);
}
 
Example 9
Source File: BlockManagerTestUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
    String rackLocation, DatanodeStorage storage, String hostname) {
    DatanodeDescriptor dn = DFSTestUtil.getDatanodeDescriptor(ipAddr,
        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, rackLocation, hostname);
    if (storage != null) {
      dn.updateStorage(storage);
    }
    return dn;
}
 
Example 10
Source File: TestNetworkTopology.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testContains() throws Exception {
  DatanodeDescriptor nodeNotInMap = 
    DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r4");
  for (int i=0; i < dataNodes.length; i++) {
    assertTrue(cluster.contains(dataNodes[i]));
  }
  assertFalse(cluster.contains(nodeNotInMap));
}
 
Example 11
Source File: TestNetworkTopology.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setupDatanodes() {
  dataNodes = new DatanodeDescriptor[] {
      DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
      DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
      DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
      DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
      DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3"),
      DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r3"),
      DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d3/r1"),
      DFSTestUtil.getDatanodeDescriptor("10.10.10.10", "/d3/r1"),
      DFSTestUtil.getDatanodeDescriptor("11.11.11.11", "/d3/r1"),
      DFSTestUtil.getDatanodeDescriptor("12.12.12.12", "/d3/r2"),
      DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2"),
      DFSTestUtil.getDatanodeDescriptor("14.14.14.14", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("15.15.15.15", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("16.16.16.16", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("17.17.17.17", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("18.18.18.18", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("19.19.19.19", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("20.20.20.20", "/d4/r1"),        
  };
  for (int i = 0; i < dataNodes.length; i++) {
    cluster.add(dataNodes[i]);
  }
  dataNodes[9].setDecommissioned();
  dataNodes[10].setDecommissioned();
}
 
Example 12
Source File: TestHost2NodesMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testRemove() throws Exception {
  DatanodeDescriptor nodeNotInMap =
    DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
  assertFalse(map.remove(nodeNotInMap));
  
  assertTrue(map.remove(dataNodes[0]));
  assertTrue(map.getDatanodeByHost("1.1.1.1.")==null);
  assertTrue(map.getDatanodeByHost("2.2.2.2")==dataNodes[1]);
  DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
  assertTrue(node==dataNodes[2] || node==dataNodes[3]);
  assertNull(map.getDatanodeByHost("4.4.4.4"));
  
  assertTrue(map.remove(dataNodes[2]));
  assertNull(map.getDatanodeByHost("1.1.1.1"));
  assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
  assertEquals(map.getDatanodeByHost("3.3.3.3"), dataNodes[3]);
  
  assertTrue(map.remove(dataNodes[3]));
  assertNull(map.getDatanodeByHost("1.1.1.1"));
  assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
  assertNull(map.getDatanodeByHost("3.3.3.3"));
  
  assertFalse(map.remove(null));
  assertTrue(map.remove(dataNodes[1]));
  assertFalse(map.remove(dataNodes[1]));
}
 
Example 13
Source File: TestHost2NodesMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testContains() throws Exception {
  DatanodeDescriptor nodeNotInMap =
    DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r4");
  for (int i = 0; i < dataNodes.length; i++) {
    assertTrue(map.contains(dataNodes[i]));
  }
  assertFalse(map.contains(null));
  assertFalse(map.contains(nodeNotInMap));
}
 
Example 14
Source File: TestHost2NodesMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
  dataNodes = new DatanodeDescriptor[] {
      DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", 5021, "/d1/r2"),
  };
  for (DatanodeDescriptor node : dataNodes) {
    map.add(node);
  }
  map.add(null);
}
 
Example 15
Source File: BlockManagerTestUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
    String rackLocation, DatanodeStorage storage, String hostname) {
    DatanodeDescriptor dn = DFSTestUtil.getDatanodeDescriptor(ipAddr,
        DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, rackLocation, hostname);
    if (storage != null) {
      dn.updateStorage(storage);
    }
    return dn;
}
 
Example 16
Source File: TestNetworkTopology.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setupDatanodes() {
  dataNodes = new DatanodeDescriptor[] {
      DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
      DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
      DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
      DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d1/r2"),
      DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3"),
      DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/d2/r3"),
      DFSTestUtil.getDatanodeDescriptor("8.8.8.8", "/d2/r3"),
      DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d3/r1"),
      DFSTestUtil.getDatanodeDescriptor("10.10.10.10", "/d3/r1"),
      DFSTestUtil.getDatanodeDescriptor("11.11.11.11", "/d3/r1"),
      DFSTestUtil.getDatanodeDescriptor("12.12.12.12", "/d3/r2"),
      DFSTestUtil.getDatanodeDescriptor("13.13.13.13", "/d3/r2"),
      DFSTestUtil.getDatanodeDescriptor("14.14.14.14", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("15.15.15.15", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("16.16.16.16", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("17.17.17.17", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("18.18.18.18", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("19.19.19.19", "/d4/r1"),
      DFSTestUtil.getDatanodeDescriptor("20.20.20.20", "/d4/r1"),        
  };
  for (int i = 0; i < dataNodes.length; i++) {
    cluster.add(dataNodes[i]);
  }
  dataNodes[9].setDecommissioned();
  dataNodes[10].setDecommissioned();
}
 
Example 17
Source File: TestBlockManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
  // Block originally on A1, A2, B1
  List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
  List<DatanodeDescriptor> origNodes = getNodes(origStorages);
  BlockInfoContiguous blockInfo = addBlockOnNodes(testIndex, origNodes);
  
  // Decommission all of the nodes in rack A
  List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 2);
  
  DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
  assertTrue("Source of replication should be one of the nodes the block " +
      "was on. Was: " + pipeline[0],
      origStorages.contains(pipeline[0]));
  // Only up to two nodes can be picked per rack when there are two racks.
  assertEquals("Should have two targets", 2, pipeline.length);
  
  boolean foundOneOnRackB = false;
  for (int i = 1; i < pipeline.length; i++) {
    DatanodeDescriptor target = pipeline[i].getDatanodeDescriptor();
    if (rackB.contains(target)) {
      foundOneOnRackB = true;
    }
    assertFalse(decomNodes.contains(target));
    assertFalse(origNodes.contains(target));
  }
  
  assertTrue("Should have at least one target on rack B. Pipeline: " +
      Joiner.on(",").join(pipeline),
      foundOneOnRackB);
  
  // Mark the block as received on the target nodes in the pipeline
  fulfillPipeline(blockInfo, pipeline);

  // the block is still under-replicated. Add a new node. This should allow
  // the third off-rack replica.
  DatanodeDescriptor rackCNode =
    DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC");
  rackCNode.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
  addNodes(ImmutableList.of(rackCNode));
  try {
    DatanodeStorageInfo[] pipeline2 = scheduleSingleReplication(blockInfo);
    assertEquals(2, pipeline2.length);
    assertEquals(rackCNode, pipeline2[1].getDatanodeDescriptor());
  } finally {
    removeNode(rackCNode);
  }
}
 
Example 18
Source File: TestBlockManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
  // Block originally on A1, A2, B1
  List<DatanodeStorageInfo> origStorages = getStorages(0, 1, 3);
  List<DatanodeDescriptor> origNodes = getNodes(origStorages);
  BlockInfoContiguous blockInfo = addBlockOnNodes(testIndex, origNodes);
  
  // Decommission all of the nodes in rack A
  List<DatanodeDescriptor> decomNodes = startDecommission(0, 1, 2);
  
  DatanodeStorageInfo[] pipeline = scheduleSingleReplication(blockInfo);
  assertTrue("Source of replication should be one of the nodes the block " +
      "was on. Was: " + pipeline[0],
      origStorages.contains(pipeline[0]));
  // Only up to two nodes can be picked per rack when there are two racks.
  assertEquals("Should have two targets", 2, pipeline.length);
  
  boolean foundOneOnRackB = false;
  for (int i = 1; i < pipeline.length; i++) {
    DatanodeDescriptor target = pipeline[i].getDatanodeDescriptor();
    if (rackB.contains(target)) {
      foundOneOnRackB = true;
    }
    assertFalse(decomNodes.contains(target));
    assertFalse(origNodes.contains(target));
  }
  
  assertTrue("Should have at least one target on rack B. Pipeline: " +
      Joiner.on(",").join(pipeline),
      foundOneOnRackB);
  
  // Mark the block as received on the target nodes in the pipeline
  fulfillPipeline(blockInfo, pipeline);

  // the block is still under-replicated. Add a new node. This should allow
  // the third off-rack replica.
  DatanodeDescriptor rackCNode =
    DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC");
  rackCNode.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
  addNodes(ImmutableList.of(rackCNode));
  try {
    DatanodeStorageInfo[] pipeline2 = scheduleSingleReplication(blockInfo);
    assertEquals(2, pipeline2.length);
    assertEquals(rackCNode, pipeline2[1].getDatanodeDescriptor());
  } finally {
    removeNode(rackCNode);
  }
}