Java Code Examples for org.apache.hadoop.net.StaticMapping#addNodeToRack()

The following examples show how to use org.apache.hadoop.net.StaticMapping#addNodeToRack() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSCMContainerPlacementPolicyMetrics.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws Exception {
  OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
      "org.apache.hadoop.hdds.scm.container.placement.algorithms." +
          "SCMContainerPlacementRackAware");
  // TODO enable when RATIS-788 is fixed
  conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, false);
  conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      StaticMapping.class, DNSToSwitchMapping.class);
  StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
      Collections.singleton(HddsUtils.getHostName(conf))).get(0),
      "/rack1");
  cluster = MiniOzoneCluster.newBuilder(conf)
      .setNumDatanodes(4)
      .setTotalPipelineNumLimit(10)
      .build();
  cluster.waitForClusterToBeReady();
  metrics = getMetrics(SCMContainerPlacementMetrics.class.getSimpleName());
  ozClient = OzoneClientFactory.getRpcClient(conf);
  store = ozClient.getObjectStore();
}
 
Example 2
Source File: SimulatorEngine.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Start simulated task trackers based on topology.
 * @param clusterStory The cluster topology.
 * @param now
 *    time stamp when the simulator is started, {@link SimulatorTaskTracker}s
 *    are started shortly after this time stamp
 */
void startTaskTrackers(ClusterStory clusterStory, long now) {
  /** port assigned to TTs, incremented by 1 for each TT */
  int port = 10000;
  long ms = now + 100;

  for (MachineNode node : clusterStory.getMachines()) {
    String hostname = node.getName();
    RackNode rackNode = node.getRackNode();
    StaticMapping.addNodeToRack(hostname, rackNode.getName());
    String taskTrackerName = "tracker_" + hostname + ":localhost/127.0.0.1:"
        + port;
    port++;
    SimulatorTaskTracker tt = new SimulatorTaskTracker(jt, taskTrackerName,
        hostname, node.getMapSlots(), node.getReduceSlots());
    queue.addAll(tt.init(ms++));
  }
}
 
Example 3
Source File: MiniMRCluster.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/**
 * Start the tasktracker.
 */
public void startTaskTracker(String host, String rack, int idx, int numDir) 
throws IOException {
  if (rack != null) {
    StaticMapping.addNodeToRack(host, rack);
  }
  if (host != null) {
    NetUtils.addStaticResolution(host, "localhost");
  }
  TaskTrackerRunner taskTracker;
  taskTracker = new TaskTrackerRunner(idx, numDir, host, conf);
  
  Thread taskTrackerThread = new Thread(taskTracker);
  taskTrackerList.add(taskTracker);
  taskTrackerThreadList.add(taskTrackerThread);
  taskTrackerThread.start();
  ++numTaskTrackers;
}
 
Example 4
Source File: TestDefaultBlockPlacementPolicy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Verify rack-local node selection for the rack-local client in case of no
 * local node
 */
@Test
public void testLocalRackPlacement() throws Exception {
  String clientMachine = "client.foo.com";
  // Map client to RACK2
  String clientRack = "/RACK2";
  StaticMapping.addNodeToRack(clientMachine, clientRack);
  testPlacement(clientMachine, clientRack);
}
 
Example 5
Source File: TestDefaultBlockPlacementPolicy.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Verify rack-local node selection for the rack-local client in case of no
 * local node
 */
@Test
public void testLocalRackPlacement() throws Exception {
  String clientMachine = "client.foo.com";
  // Map client to RACK2
  String clientRack = "/RACK2";
  StaticMapping.addNodeToRack(clientMachine, clientRack);
  testPlacement(clientMachine, clientRack);
}
 
Example 6
Source File: MiniMRCluster.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Start the tasktracker.
 */
public void startTaskTracker(String host, String rack, int idx, int numDir) 
throws IOException {
  if (rack != null) {
    StaticMapping.addNodeToRack(host, rack);
  }
  if (host != null) {
    NetUtils.addStaticResolution(host, "localhost");
  }
  TaskTrackerRunner taskTracker;
  taskTracker = new TaskTrackerRunner(idx, numDir, host, conf);
  
  addTaskTracker(taskTracker);
}
 
Example 7
Source File: TestStartupDefaultRack.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Test
public void testStartup() throws Exception {
  conf = new Configuration();
  conf.setClass("dfs.block.replicator.classname",
      BlockPlacementPolicyConfigurable.class, BlockPlacementPolicy.class);
  File baseDir = MiniDFSCluster.getBaseDirectory(conf);
  baseDir.mkdirs();
  File hostsFile = new File(baseDir, "hosts");
  FileOutputStream out = new FileOutputStream(hostsFile);
  out.write("h1\n".getBytes());
  out.write("h2\n".getBytes());
  out.write("h3\n".getBytes());
  out.close();
  conf.set("dfs.hosts", hostsFile.getAbsolutePath());
  StaticMapping.addNodeToRack("h1", "/r1");
  StaticMapping.addNodeToRack("h2", "/r2");
  StaticMapping.addNodeToRack("h3", NetworkTopology.DEFAULT_RACK);
  cluster = new MiniDFSCluster(conf, 3, new String[] { "/r1", "/r2",
      NetworkTopology.DEFAULT_RACK }, new String[] { "h1", "h2", "h3" },
      true, false);
  DFSTestUtil util = new DFSTestUtil("/testStartup", 10, 10, 1024);
  util.createFiles(cluster.getFileSystem(), "/");
  util.checkFiles(cluster.getFileSystem(), "/");
  assertEquals(2,
      cluster.getNameNode().getDatanodeReport(DatanodeReportType.LIVE).length);
  cluster.shutdown();
}
 
Example 8
Source File: MiniCoronaCluster.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void startTaskTracker(String host, String rack, int idx, int numDir)
    throws IOException {
  if (rack != null) {
    StaticMapping.addNodeToRack(host, rack);
  }
  if (host != null) {
    NetUtils.addStaticResolution(host, "localhost");
  }
  TaskTrackerRunner taskTracker;
  taskTracker = new TaskTrackerRunner(idx, numDir, host, conf);
  addTaskTracker(taskTracker);
}
 
Example 9
Source File: TestStorageContainerManager.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Test datanode heartbeat well processed with a 4-layer network topology.
 */
@Test(timeout = 60000)
public void testScmProcessDatanodeHeartbeat() throws Exception {
  OzoneConfiguration conf = new OzoneConfiguration();
  String scmId = UUID.randomUUID().toString();
  conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      StaticMapping.class, DNSToSwitchMapping.class);
  StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
      Collections.singleton(HddsUtils.getHostName(conf))).get(0),
      "/rack1");

  final int datanodeNum = 3;
  MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
      .setNumDatanodes(datanodeNum)
      .setScmId(scmId)
      .build();
  cluster.waitForClusterToBeReady();
  StorageContainerManager scm = cluster.getStorageContainerManager();

  try {
    // first sleep 10s
    Thread.sleep(10000);
    // verify datanode heartbeats are well processed
    long heartbeatCheckerIntervalMs =
        MiniOzoneCluster.Builder.DEFAULT_HB_INTERVAL_MS;
    long start = Time.monotonicNow();
    Thread.sleep(heartbeatCheckerIntervalMs * 2);

    List<DatanodeDetails> allNodes = scm.getScmNodeManager().getAllNodes();
    Assert.assertEquals(datanodeNum, allNodes.size());
    for (DatanodeDetails node : allNodes) {
      DatanodeInfo datanodeInfo = (DatanodeInfo) scm.getScmNodeManager()
          .getNodeByUuid(node.getUuidString());
      Assert.assertTrue(datanodeInfo.getLastHeartbeatTime() > start);
      Assert.assertEquals(datanodeInfo.getUuidString(),
          datanodeInfo.getNetworkName());
      Assert.assertEquals("/rack1", datanodeInfo.getNetworkLocation());
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 10
Source File: TestFailureHandlingByClient.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Create a MiniDFSCluster for testing.
 * <p>
 * Ozone is made active by setting OZONE_ENABLED = true
 *
 * @throws IOException
 */
private void init() throws Exception {
  conf = new OzoneConfiguration();
  chunkSize = (int) OzoneConsts.MB;
  blockSize = 4 * chunkSize;
  conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS);
  conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
      + ".client.request.write.timeout", 30, TimeUnit.SECONDS);
  conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
      + ".client.request.watch.timeout", 30, TimeUnit.SECONDS);
  conf.setTimeDuration(
      OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
      1, TimeUnit.SECONDS);
  conf.setBoolean(
      OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true);
  conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
                  DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
          3, TimeUnit.SECONDS);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
                  DatanodeRatisServerConfig.
                          RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
          3, TimeUnit.SECONDS);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                  "rpc.request.timeout",
          3, TimeUnit.SECONDS);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                  "watch.request.timeout",
          3, TimeUnit.SECONDS);
  conf.setBoolean(
      OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_DELAY, false);
  conf.setQuietMode(false);
  conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      StaticMapping.class, DNSToSwitchMapping.class);
  StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
      Collections.singleton(HddsUtils.getHostName(conf))).get(0),
      "/rack1");
  cluster = MiniOzoneCluster.newBuilder(conf)
      .setNumDatanodes(10).setTotalPipelineNumLimit(15).build();
  cluster.waitForClusterToBeReady();
  //the easiest way to create an open container is creating a key
  client = OzoneClientFactory.getRpcClient(conf);
  objectStore = client.getObjectStore();
  keyString = UUID.randomUUID().toString();
  volumeName = "datanodefailurehandlingtest";
  bucketName = volumeName;
  objectStore.createVolume(volumeName);
  objectStore.getVolume(volumeName).createBucket(bucketName);
}
 
Example 11
Source File: TestFailureHandlingByClientFlushDelay.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Create a MiniDFSCluster for testing.
 * <p>
 * Ozone is made active by setting OZONE_ENABLED = true
 *
 * @throws IOException
 */
private void init() throws Exception {
  conf = new OzoneConfiguration();
  chunkSize = 100;
  flushSize = 2 * chunkSize;
  maxFlushSize = 2 * flushSize;
  blockSize = 4 * chunkSize;
  conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS);
  conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
      + ".client.request.write.timeout", 30, TimeUnit.SECONDS);
  conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
      + ".client.request.watch.timeout", 30, TimeUnit.SECONDS);
  conf.setTimeDuration(
      OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
      1, TimeUnit.SECONDS);
  conf.setBoolean(
      OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true);
  conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
                  DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
          3, TimeUnit.SECONDS);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
                  DatanodeRatisServerConfig.
                          RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
          3, TimeUnit.SECONDS);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                  "rpc.request.timeout",
          3, TimeUnit.SECONDS);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                  "watch.request.timeout",
          3, TimeUnit.SECONDS);
  conf.setQuietMode(false);
  conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      StaticMapping.class, DNSToSwitchMapping.class);
  StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
      Collections.singleton(HddsUtils.getHostName(conf))).get(0),
      "/rack1");
  cluster = MiniOzoneCluster.newBuilder(conf)
      .setNumDatanodes(10)
      .setTotalPipelineNumLimit(15)
      .setChunkSize(chunkSize)
      .setBlockSize(blockSize)
      .setStreamBufferFlushSize(flushSize)
      .setStreamBufferMaxSize(maxFlushSize)
      .setStreamBufferSizeUnit(StorageUnit.BYTES).build();
  cluster.waitForClusterToBeReady();
  //the easiest way to create an open container is creating a key
  client = OzoneClientFactory.getRpcClient(conf);
  objectStore = client.getObjectStore();
  keyString = UUID.randomUUID().toString();
  volumeName = "datanodefailurehandlingtest";
  bucketName = volumeName;
  objectStore.createVolume(volumeName);
  objectStore.getVolume(volumeName).createBucket(bucketName);
}
 
Example 12
Source File: MiniAvatarCluster.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void startDataNodes() throws IOException {
  if (racks != null && numDataNodes > racks.length ) {
    throw new IllegalArgumentException( "The length of racks [" + 
                                        racks.length +
                                        "] is less than the number " +
                                        "of datanodes [" +
                                        numDataNodes + "].");
  }
  if (hosts != null && numDataNodes > hosts.length ) {
    throw new IllegalArgumentException( "The length of hosts [" + 
                                        hosts.length +
                                        "] is less than the number " +
                                        "of datanodes [" +
                                        numDataNodes + "].");
  }

  //Generate some hostnames if required
  if (racks != null && hosts == null) {
    LOG.info("Generating host names for datanodes");
    hosts = new String[numDataNodes];
    for (int i = 0; i < numDataNodes; i++) {
      hosts[i] = "host" + i + ".foo.com";
    }
  }
  
  
  String[] dnArgs = { HdfsConstants.StartupOption.REGULAR.getName() };
  
  for (int i = 0; i < numDataNodes; i++) {
    Configuration dnConf = new Configuration(conf);

    File dir1 = new File(dataDir, "data"+(2*i+1));
    File dir2 = new File(dataDir, "data"+(2*i+2));
    dir1.mkdirs();
    dir2.mkdirs();
    if (!dir1.isDirectory() || !dir2.isDirectory()) { 
      throw new IOException("Mkdirs failed to create directory for DataNode "
                            + i + ": " + dir1 + " or " + dir2);
    }
    dnConf.set("dfs.data.dir", dir1.getPath() + "," + dir2.getPath()); 

    LOG.info("Starting DataNode " + i + " with dfs.data.dir: " 
                       + dnConf.get("dfs.data.dir"));
    
    if (hosts != null) {
      dnConf.set("slave.host.name", hosts[i]);
      LOG.info("Starting DataNode " + i + " with hostname set to: " 
                         + dnConf.get("slave.host.name"));
    }

    if (racks != null) {
      String name = hosts[i];
      LOG.info("Adding node with hostname : " + name + " to rack "+
                         racks[i]);
      StaticMapping.addNodeToRack(name,
                                  racks[i]);
    }
    Configuration newconf = new Configuration(dnConf); // save config
    if (hosts != null) {
      NetUtils.addStaticResolution(hosts[i], "localhost");
    }
    AvatarDataNode dn = AvatarDataNode.instantiateDataNode(dnArgs, dnConf);
    //since the HDFS does things based on IP:port, we need to add the mapping
    //for IP:port to rackId
    
    String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
    if (racks != null) {
      int port = dn.getSelfAddr().getPort();
      System.out.println("Adding node with IP:port : " + ipAddr + ":" + port+
                          " to rack " + racks[i]);
      StaticMapping.addNodeToRack(ipAddr + ":" + port,
                                racks[i]);
    }
    dn.runDatanodeDaemon();
    dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs));

  }

}