org.apache.hadoop.net.StaticMapping Java Examples

The following examples show how to use org.apache.hadoop.net.StaticMapping. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MiniMRCluster.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Create the job tracker and run it.
 */
public void run() {
  try {
    jc = (jc == null) ? createJobConf() : createJobConf(jc);
    File f = new File("build/test/mapred/local").getAbsoluteFile();
    jc.set("mapred.local.dir",f.getAbsolutePath());
    jc.setClass("topology.node.switch.mapping.impl", 
        StaticMapping.class, DNSToSwitchMapping.class);
    String id = 
      new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
    tracker = JobTracker.startTracker(jc, id);
    tracker.offerService();
  } catch (Throwable e) {
    LOG.error("Job tracker crashed", e);
    isActive = false;
  }
}
 
Example #2
Source File: MiniMRCluster.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/**
 * Start the tasktracker.
 */
public void startTaskTracker(String host, String rack, int idx, int numDir) 
throws IOException {
  if (rack != null) {
    StaticMapping.addNodeToRack(host, rack);
  }
  if (host != null) {
    NetUtils.addStaticResolution(host, "localhost");
  }
  TaskTrackerRunner taskTracker;
  taskTracker = new TaskTrackerRunner(idx, numDir, host, conf);
  
  Thread taskTrackerThread = new Thread(taskTracker);
  taskTrackerList.add(taskTracker);
  taskTrackerThreadList.add(taskTrackerThread);
  taskTrackerThread.start();
  ++numTaskTrackers;
}
 
Example #3
Source File: MiniCoronaCluster.java    From RDFS with Apache License 2.0 6 votes vote down vote up
static void configureJobConf(JobConf conf, String namenode,
    int clusterManagerPort, int proxyJobTrackerPort,
    UnixUserGroupInformation ugi) {
  FileSystem.setDefaultUri(conf, namenode);
  conf.set(CoronaConf.CM_ADDRESS,
             "localhost:" + clusterManagerPort);
  conf.set(CoronaConf.PROXY_JOB_TRACKER_ADDRESS,
    "localhost:" + proxyJobTrackerPort);
  conf.set("mapred.job.tracker", "corona");
  conf.set("mapred.job.tracker.http.address",
                      "127.0.0.1:0");
  conf.setClass("topology.node.switch.mapping.impl",
      StaticMapping.class, DNSToSwitchMapping.class);
  conf.set("mapred.job.tracker.class", CoronaJobTracker.class.getName());
  if (ugi != null) {
    conf.set("mapred.system.dir", "/mapred/system");
    UnixUserGroupInformation.saveToConf(conf,
        UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
  }
  // for debugging have all task output sent to the test output
  JobClient.setTaskOutputFilter(conf, JobClient.TaskStatusFilter.ALL);
}
 
Example #4
Source File: SimulatorEngine.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Start simulated task trackers based on topology.
 * @param clusterStory The cluster topology.
 * @param now
 *    time stamp when the simulator is started, {@link SimulatorTaskTracker}s
 *    are started shortly after this time stamp
 */
void startTaskTrackers(ClusterStory clusterStory, long now) {
  /** port assigned to TTs, incremented by 1 for each TT */
  int port = 10000;
  long ms = now + 100;

  for (MachineNode node : clusterStory.getMachines()) {
    String hostname = node.getName();
    RackNode rackNode = node.getRackNode();
    StaticMapping.addNodeToRack(hostname, rackNode.getName());
    String taskTrackerName = "tracker_" + hostname + ":localhost/127.0.0.1:"
        + port;
    port++;
    SimulatorTaskTracker tt = new SimulatorTaskTracker(jt, taskTrackerName,
        hostname, node.getMapSlots(), node.getReduceSlots());
    queue.addAll(tt.init(ms++));
  }
}
 
Example #5
Source File: TestDefaultBlockPlacementPolicy.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws IOException {
  StaticMapping.resetMap();
  Configuration conf = new HdfsConfiguration();
  final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
  final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };

  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
      .hosts(hosts).build();
  cluster.waitActive();
  nameNodeRpc = cluster.getNameNodeRpc();
  namesystem = cluster.getNamesystem();
  perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
      FsPermission.getDefault());
}
 
Example #6
Source File: TestDefaultBlockPlacementPolicy.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws IOException {
  StaticMapping.resetMap();
  Configuration conf = new HdfsConfiguration();
  final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
  final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };

  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
      .hosts(hosts).build();
  cluster.waitActive();
  nameNodeRpc = cluster.getNameNodeRpc();
  namesystem = cluster.getNamesystem();
  perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
      FsPermission.getDefault());
}
 
Example #7
Source File: TestSCMContainerPlacementPolicyMetrics.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws Exception {
  OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
      "org.apache.hadoop.hdds.scm.container.placement.algorithms." +
          "SCMContainerPlacementRackAware");
  // TODO enable when RATIS-788 is fixed
  conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, false);
  conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      StaticMapping.class, DNSToSwitchMapping.class);
  StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
      Collections.singleton(HddsUtils.getHostName(conf))).get(0),
      "/rack1");
  cluster = MiniOzoneCluster.newBuilder(conf)
      .setNumDatanodes(4)
      .setTotalPipelineNumLimit(10)
      .build();
  cluster.waitForClusterToBeReady();
  metrics = getMetrics(SCMContainerPlacementMetrics.class.getSimpleName());
  ozClient = OzoneClientFactory.getRpcClient(conf);
  store = ozClient.getObjectStore();
}
 
Example #8
Source File: TestDefaultBlockPlacementPolicy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Verify rack-local node selection for the rack-local client in case of no
 * local node
 */
@Test
public void testLocalRackPlacement() throws Exception {
  String clientMachine = "client.foo.com";
  // Map client to RACK2
  String clientRack = "/RACK2";
  StaticMapping.addNodeToRack(clientMachine, clientRack);
  testPlacement(clientMachine, clientRack);
}
 
Example #9
Source File: TestDefaultBlockPlacementPolicy.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Verify rack-local node selection for the rack-local client in case of no
 * local node
 */
@Test
public void testLocalRackPlacement() throws Exception {
  String clientMachine = "client.foo.com";
  // Map client to RACK2
  String clientRack = "/RACK2";
  StaticMapping.addNodeToRack(clientMachine, clientRack);
  testPlacement(clientMachine, clientRack);
}
 
Example #10
Source File: MiniMRCluster.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Start the tasktracker.
 */
public void startTaskTracker(String host, String rack, int idx, int numDir) 
throws IOException {
  if (rack != null) {
    StaticMapping.addNodeToRack(host, rack);
  }
  if (host != null) {
    NetUtils.addStaticResolution(host, "localhost");
  }
  TaskTrackerRunner taskTracker;
  taskTracker = new TaskTrackerRunner(idx, numDir, host, conf);
  
  addTaskTracker(taskTracker);
}
 
Example #11
Source File: TestStartupDefaultRack.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Test
public void testStartup() throws Exception {
  conf = new Configuration();
  conf.setClass("dfs.block.replicator.classname",
      BlockPlacementPolicyConfigurable.class, BlockPlacementPolicy.class);
  File baseDir = MiniDFSCluster.getBaseDirectory(conf);
  baseDir.mkdirs();
  File hostsFile = new File(baseDir, "hosts");
  FileOutputStream out = new FileOutputStream(hostsFile);
  out.write("h1\n".getBytes());
  out.write("h2\n".getBytes());
  out.write("h3\n".getBytes());
  out.close();
  conf.set("dfs.hosts", hostsFile.getAbsolutePath());
  StaticMapping.addNodeToRack("h1", "/r1");
  StaticMapping.addNodeToRack("h2", "/r2");
  StaticMapping.addNodeToRack("h3", NetworkTopology.DEFAULT_RACK);
  cluster = new MiniDFSCluster(conf, 3, new String[] { "/r1", "/r2",
      NetworkTopology.DEFAULT_RACK }, new String[] { "h1", "h2", "h3" },
      true, false);
  DFSTestUtil util = new DFSTestUtil("/testStartup", 10, 10, 1024);
  util.createFiles(cluster.getFileSystem(), "/");
  util.checkFiles(cluster.getFileSystem(), "/");
  assertEquals(2,
      cluster.getNameNode().getDatanodeReport(DatanodeReportType.LIVE).length);
  cluster.shutdown();
}
 
Example #12
Source File: MiniCoronaCluster.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public void startTaskTracker(String host, String rack, int idx, int numDir)
    throws IOException {
  if (rack != null) {
    StaticMapping.addNodeToRack(host, rack);
  }
  if (host != null) {
    NetUtils.addStaticResolution(host, "localhost");
  }
  TaskTrackerRunner taskTracker;
  taskTracker = new TaskTrackerRunner(idx, numDir, host, conf);
  addTaskTracker(taskTracker);
}
 
Example #13
Source File: MiniMRCluster.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Create the job tracker and run it.
 */
public void run() {
  try {
    jc = (jc == null) ? createJobConf() : createJobConf(jc);
    File f = new File("build/test/mapred/local").getAbsoluteFile();
    jc.set("mapred.local.dir",f.getAbsolutePath());
    jc.setClass("topology.node.switch.mapping.impl", 
        StaticMapping.class, DNSToSwitchMapping.class);
    String id = 
      new SimpleDateFormat("yyyyMMddHHmmssSSS").format(new Date());
    tracker = JobTracker.startTracker(jc, id);
    tracker.offerService();
  } catch (Throwable e) {
    LOG.error("Job tracker crashed", e);
    isActive = false;
  }
}
 
Example #14
Source File: MiniDFSCluster.java    From big-c with Apache License 2.0 4 votes vote down vote up
private void initMiniDFSCluster(
    Configuration conf,
    int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs,
    boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
    boolean manageDataDfsDirs, StartupOption startOpt,
    StartupOption dnStartOpt, String[] racks,
    String[] hosts,
    long[][] storageCapacities, long[] simulatedCapacities, String clusterId,
    boolean waitSafeMode, boolean setupHostsFile,
    MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
    boolean checkDataNodeAddrConfig,
    boolean checkDataNodeHostConfig,
    Configuration[] dnConfOverlays,
    boolean skipFsyncForTesting)
throws IOException {
  boolean success = false;
  try {
    ExitUtil.disableSystemExit();

    // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
    FileSystem.enableSymlinks();

    synchronized (MiniDFSCluster.class) {
      instanceId = instanceCount++;
    }

    this.conf = conf;
    base_dir = new File(determineDfsBaseDir());
    data_dir = new File(base_dir, "data");
    this.waitSafeMode = waitSafeMode;
    this.checkExitOnShutdown = checkExitOnShutdown;
  
    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    int safemodeExtension = conf.getInt(
        DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                   StaticMapping.class, DNSToSwitchMapping.class);
  
    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
    // it needs to know the HTTP port of the Active. So, if ephemeral ports
    // are chosen, disable checkpoints for the test.
    if (!nnTopology.allHttpPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
          "since no HTTP ports have been specified.");
      conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
    }
    if (!nnTopology.allIpcPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling log-roll triggering in the "
          + "Standby node since no IPC ports have been specified.");
      conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
    }

    EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting);
  
    federation = nnTopology.isFederated();
    try {
      createNameNodesAndSetConf(
          nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
          enableManagedDfsDirsRedundancy,
          format, startOpt, clusterId, conf);
    } catch (IOException ioe) {
      LOG.error("IOE creating namenodes. Permissions dump:\n" +
          createPermissionsDiagnosisString(data_dir), ioe);
      throw ioe;
    }
    if (format) {
      if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
        throw new IOException("Cannot remove data directory: " + data_dir +
            createPermissionsDiagnosisString(data_dir));
      }
    }
  
    if (startOpt == StartupOption.RECOVER) {
      return;
    }

    // Start the DataNodes
    startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
        dnStartOpt != null ? dnStartOpt : startOpt,
        racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
        checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
    waitClusterUp();
    //make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    success = true;
  } finally {
    if (!success) {
      shutdown();
    }
  }
}
 
Example #15
Source File: MiniDFSCluster.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void initMiniDFSCluster(
    Configuration conf,
    int numDataNodes, StorageType[][] storageTypes, boolean format, boolean manageNameDfsDirs,
    boolean manageNameDfsSharedDirs, boolean enableManagedDfsDirsRedundancy,
    boolean manageDataDfsDirs, StartupOption startOpt,
    StartupOption dnStartOpt, String[] racks,
    String[] hosts,
    long[][] storageCapacities, long[] simulatedCapacities, String clusterId,
    boolean waitSafeMode, boolean setupHostsFile,
    MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
    boolean checkDataNodeAddrConfig,
    boolean checkDataNodeHostConfig,
    Configuration[] dnConfOverlays,
    boolean skipFsyncForTesting)
throws IOException {
  boolean success = false;
  try {
    ExitUtil.disableSystemExit();

    // Re-enable symlinks for tests, see HADOOP-10020 and HADOOP-10052
    FileSystem.enableSymlinks();

    synchronized (MiniDFSCluster.class) {
      instanceId = instanceCount++;
    }

    this.conf = conf;
    base_dir = new File(determineDfsBaseDir());
    data_dir = new File(base_dir, "data");
    this.waitSafeMode = waitSafeMode;
    this.checkExitOnShutdown = checkExitOnShutdown;
  
    int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
    conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
    int safemodeExtension = conf.getInt(
        DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
    conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
    conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                   StaticMapping.class, DNSToSwitchMapping.class);
  
    // In an HA cluster, in order for the StandbyNode to perform checkpoints,
    // it needs to know the HTTP port of the Active. So, if ephemeral ports
    // are chosen, disable checkpoints for the test.
    if (!nnTopology.allHttpPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling checkpointing in the Standby node " +
          "since no HTTP ports have been specified.");
      conf.setBoolean(DFS_HA_STANDBY_CHECKPOINTS_KEY, false);
    }
    if (!nnTopology.allIpcPortsSpecified() &&
        nnTopology.isHA()) {
      LOG.info("MiniDFSCluster disabling log-roll triggering in the "
          + "Standby node since no IPC ports have been specified.");
      conf.setInt(DFS_HA_LOGROLL_PERIOD_KEY, -1);
    }

    EditLogFileOutputStream.setShouldSkipFsyncForTesting(skipFsyncForTesting);
  
    federation = nnTopology.isFederated();
    try {
      createNameNodesAndSetConf(
          nnTopology, manageNameDfsDirs, manageNameDfsSharedDirs,
          enableManagedDfsDirsRedundancy,
          format, startOpt, clusterId, conf);
    } catch (IOException ioe) {
      LOG.error("IOE creating namenodes. Permissions dump:\n" +
          createPermissionsDiagnosisString(data_dir), ioe);
      throw ioe;
    }
    if (format) {
      if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
        throw new IOException("Cannot remove data directory: " + data_dir +
            createPermissionsDiagnosisString(data_dir));
      }
    }
  
    if (startOpt == StartupOption.RECOVER) {
      return;
    }

    // Start the DataNodes
    startDataNodes(conf, numDataNodes, storageTypes, manageDataDfsDirs,
        dnStartOpt != null ? dnStartOpt : startOpt,
        racks, hosts, storageCapacities, simulatedCapacities, setupHostsFile,
        checkDataNodeAddrConfig, checkDataNodeHostConfig, dnConfOverlays);
    waitClusterUp();
    //make sure ProxyUsers uses the latest conf
    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
    success = true;
  } finally {
    if (!success) {
      shutdown();
    }
  }
}
 
Example #16
Source File: TestStorageContainerManager.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Test datanode heartbeat well processed with a 4-layer network topology.
 */
@Test(timeout = 60000)
public void testScmProcessDatanodeHeartbeat() throws Exception {
  OzoneConfiguration conf = new OzoneConfiguration();
  String scmId = UUID.randomUUID().toString();
  conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      StaticMapping.class, DNSToSwitchMapping.class);
  StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
      Collections.singleton(HddsUtils.getHostName(conf))).get(0),
      "/rack1");

  final int datanodeNum = 3;
  MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
      .setNumDatanodes(datanodeNum)
      .setScmId(scmId)
      .build();
  cluster.waitForClusterToBeReady();
  StorageContainerManager scm = cluster.getStorageContainerManager();

  try {
    // first sleep 10s
    Thread.sleep(10000);
    // verify datanode heartbeats are well processed
    long heartbeatCheckerIntervalMs =
        MiniOzoneCluster.Builder.DEFAULT_HB_INTERVAL_MS;
    long start = Time.monotonicNow();
    Thread.sleep(heartbeatCheckerIntervalMs * 2);

    List<DatanodeDetails> allNodes = scm.getScmNodeManager().getAllNodes();
    Assert.assertEquals(datanodeNum, allNodes.size());
    for (DatanodeDetails node : allNodes) {
      DatanodeInfo datanodeInfo = (DatanodeInfo) scm.getScmNodeManager()
          .getNodeByUuid(node.getUuidString());
      Assert.assertTrue(datanodeInfo.getLastHeartbeatTime() > start);
      Assert.assertEquals(datanodeInfo.getUuidString(),
          datanodeInfo.getNetworkName());
      Assert.assertEquals("/rack1", datanodeInfo.getNetworkLocation());
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example #17
Source File: TestFailureHandlingByClientFlushDelay.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Create a MiniDFSCluster for testing.
 * <p>
 * Ozone is made active by setting OZONE_ENABLED = true
 *
 * @throws IOException
 */
private void init() throws Exception {
  conf = new OzoneConfiguration();
  chunkSize = 100;
  flushSize = 2 * chunkSize;
  maxFlushSize = 2 * flushSize;
  blockSize = 4 * chunkSize;
  conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS);
  conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
      + ".client.request.write.timeout", 30, TimeUnit.SECONDS);
  conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
      + ".client.request.watch.timeout", 30, TimeUnit.SECONDS);
  conf.setTimeDuration(
      OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
      1, TimeUnit.SECONDS);
  conf.setBoolean(
      OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true);
  conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
                  DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
          3, TimeUnit.SECONDS);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
                  DatanodeRatisServerConfig.
                          RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
          3, TimeUnit.SECONDS);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                  "rpc.request.timeout",
          3, TimeUnit.SECONDS);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                  "watch.request.timeout",
          3, TimeUnit.SECONDS);
  conf.setQuietMode(false);
  conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      StaticMapping.class, DNSToSwitchMapping.class);
  StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
      Collections.singleton(HddsUtils.getHostName(conf))).get(0),
      "/rack1");
  cluster = MiniOzoneCluster.newBuilder(conf)
      .setNumDatanodes(10)
      .setTotalPipelineNumLimit(15)
      .setChunkSize(chunkSize)
      .setBlockSize(blockSize)
      .setStreamBufferFlushSize(flushSize)
      .setStreamBufferMaxSize(maxFlushSize)
      .setStreamBufferSizeUnit(StorageUnit.BYTES).build();
  cluster.waitForClusterToBeReady();
  //the easiest way to create an open container is creating a key
  client = OzoneClientFactory.getRpcClient(conf);
  objectStore = client.getObjectStore();
  keyString = UUID.randomUUID().toString();
  volumeName = "datanodefailurehandlingtest";
  bucketName = volumeName;
  objectStore.createVolume(volumeName);
  objectStore.getVolume(volumeName).createBucket(bucketName);
}
 
Example #18
Source File: SimulatorEngine.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * Initiate components in the simulation.
 * @throws InterruptedException
 * @throws IOException if trace or topology files cannot be open
 */
@SuppressWarnings("deprecation")
void init() throws InterruptedException, IOException {
  long now = System.currentTimeMillis();

  JobConf jobConf = new JobConf(getConf());
  jobConf.setClass("topology.node.switch.mapping.impl",
      StaticMapping.class, DNSToSwitchMapping.class);
  jobConf.set("fs.default.name", "file:///");
  jobConf.set("mapred.job.tracker", "localhost:8012");
  jobConf.setInt("mapred.jobtracker.job.history.block.size", 512);
  jobConf.setInt("mapred.jobtracker.job.history.buffer.size", 512);
  jobConf.setLong("mapred.tasktracker.expiry.interval", 5000);
  jobConf.setInt("mapred.reduce.copy.backoff", 4);
  jobConf.setLong("mapred.job.reuse.jvm.num.tasks", -1);
  jobConf.setUser("mumak");
  jobConf.set("mapred.system.dir", 
      jobConf.get("hadoop.log.dir", "/tmp/hadoop-"+jobConf.getUser()) + "/mapred/system");
  jobConf.set("mapred.jobtracker.taskScheduler", JobQueueTaskScheduler.class.getName());
  
  FileSystem lfs = FileSystem.getLocal(getConf());
  Path logPath =
    new Path(System.getProperty("hadoop.log.dir")).makeQualified(lfs);
  jobConf.set("mapred.system.dir", logPath.toString());
  jobConf.set("hadoop.job.history.location", (new Path(logPath, "history")
      .toString()));
  
  jt = SimulatorJobTracker.startTracker(jobConf, now, this);
  jt.offerService();
  
  // max Map/Reduce tasks per node
  int maxMaps = getConf().getInt("mapred.tasktracker.map.tasks.maximum",
      DEFAULT_MAP_SLOTS_PER_NODE);
  int maxReduces = getConf().getInt(
      "mapred.tasktracker.reduce.tasks.maximum",
      DEFAULT_REDUCE_SLOTS_PER_NODE);

  MachineNode defaultNode = new MachineNode.Builder("default", 2)
      .setMapSlots(maxMaps).setReduceSlots(maxReduces).build();
  ZombieCluster cluster = new ZombieCluster(new Path(topologyFile), 
      defaultNode, jobConf);
  long firstJobStartTime = now + 60000;
  JobStoryProducer jobStoryProducer = new SimulatorJobStoryProducer(
      new Path(traceFile), cluster, firstJobStartTime, jobConf);
  
  jc = new SimulatorJobClient(jt, jobStoryProducer);
  queue.addAll(jc.init(firstJobStartTime));

  // create TTs based on topology.json     
  startTaskTrackers(cluster, now);
  
  terminateTime = getConf().getLong("mumak.terminate.time", Long.MAX_VALUE);
  if (terminateTime <= 0) {
    throw new IllegalArgumentException("Terminate time must be positive: "
        + terminateTime);
  }
}
 
Example #19
Source File: TestFailureHandlingByClient.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Create a MiniDFSCluster for testing.
 * <p>
 * Ozone is made active by setting OZONE_ENABLED = true
 *
 * @throws IOException
 */
private void init() throws Exception {
  conf = new OzoneConfiguration();
  chunkSize = (int) OzoneConsts.MB;
  blockSize = 4 * chunkSize;
  conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 100, TimeUnit.SECONDS);
  conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
      + ".client.request.write.timeout", 30, TimeUnit.SECONDS);
  conf.setTimeDuration(RatisHelper.HDDS_DATANODE_RATIS_PREFIX_KEY
      + ".client.request.watch.timeout", 30, TimeUnit.SECONDS);
  conf.setTimeDuration(
      OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
      1, TimeUnit.SECONDS);
  conf.setBoolean(
      OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true);
  conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
                  DatanodeRatisServerConfig.RATIS_SERVER_REQUEST_TIMEOUT_KEY,
          3, TimeUnit.SECONDS);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_SERVER_PREFIX_KEY + "." +
                  DatanodeRatisServerConfig.
                          RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
          3, TimeUnit.SECONDS);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                  "rpc.request.timeout",
          3, TimeUnit.SECONDS);
  conf.setTimeDuration(
          RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
                  "watch.request.timeout",
          3, TimeUnit.SECONDS);
  conf.setBoolean(
      OzoneConfigKeys.OZONE_CLIENT_STREAM_BUFFER_FLUSH_DELAY, false);
  conf.setQuietMode(false);
  conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      StaticMapping.class, DNSToSwitchMapping.class);
  StaticMapping.addNodeToRack(NetUtils.normalizeHostNames(
      Collections.singleton(HddsUtils.getHostName(conf))).get(0),
      "/rack1");
  cluster = MiniOzoneCluster.newBuilder(conf)
      .setNumDatanodes(10).setTotalPipelineNumLimit(15).build();
  cluster.waitForClusterToBeReady();
  //the easiest way to create an open container is creating a key
  client = OzoneClientFactory.getRpcClient(conf);
  objectStore = client.getObjectStore();
  keyString = UUID.randomUUID().toString();
  volumeName = "datanodefailurehandlingtest";
  bucketName = volumeName;
  objectStore.createVolume(volumeName);
  objectStore.getVolume(volumeName).createBucket(bucketName);
}
 
Example #20
Source File: MiniAvatarCluster.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void startDataNodes() throws IOException {
  if (racks != null && numDataNodes > racks.length ) {
    throw new IllegalArgumentException( "The length of racks [" + 
                                        racks.length +
                                        "] is less than the number " +
                                        "of datanodes [" +
                                        numDataNodes + "].");
  }
  if (hosts != null && numDataNodes > hosts.length ) {
    throw new IllegalArgumentException( "The length of hosts [" + 
                                        hosts.length +
                                        "] is less than the number " +
                                        "of datanodes [" +
                                        numDataNodes + "].");
  }

  //Generate some hostnames if required
  if (racks != null && hosts == null) {
    LOG.info("Generating host names for datanodes");
    hosts = new String[numDataNodes];
    for (int i = 0; i < numDataNodes; i++) {
      hosts[i] = "host" + i + ".foo.com";
    }
  }
  
  
  String[] dnArgs = { HdfsConstants.StartupOption.REGULAR.getName() };
  
  for (int i = 0; i < numDataNodes; i++) {
    Configuration dnConf = new Configuration(conf);

    File dir1 = new File(dataDir, "data"+(2*i+1));
    File dir2 = new File(dataDir, "data"+(2*i+2));
    dir1.mkdirs();
    dir2.mkdirs();
    if (!dir1.isDirectory() || !dir2.isDirectory()) { 
      throw new IOException("Mkdirs failed to create directory for DataNode "
                            + i + ": " + dir1 + " or " + dir2);
    }
    dnConf.set("dfs.data.dir", dir1.getPath() + "," + dir2.getPath()); 

    LOG.info("Starting DataNode " + i + " with dfs.data.dir: " 
                       + dnConf.get("dfs.data.dir"));
    
    if (hosts != null) {
      dnConf.set("slave.host.name", hosts[i]);
      LOG.info("Starting DataNode " + i + " with hostname set to: " 
                         + dnConf.get("slave.host.name"));
    }

    if (racks != null) {
      String name = hosts[i];
      LOG.info("Adding node with hostname : " + name + " to rack "+
                         racks[i]);
      StaticMapping.addNodeToRack(name,
                                  racks[i]);
    }
    Configuration newconf = new Configuration(dnConf); // save config
    if (hosts != null) {
      NetUtils.addStaticResolution(hosts[i], "localhost");
    }
    AvatarDataNode dn = AvatarDataNode.instantiateDataNode(dnArgs, dnConf);
    //since the HDFS does things based on IP:port, we need to add the mapping
    //for IP:port to rackId
    
    String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
    if (racks != null) {
      int port = dn.getSelfAddr().getPort();
      System.out.println("Adding node with IP:port : " + ipAddr + ":" + port+
                          " to rack " + racks[i]);
      StaticMapping.addNodeToRack(ipAddr + ":" + port,
                                racks[i]);
    }
    dn.runDatanodeDaemon();
    dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs));

  }

}