Java Code Examples for org.apache.hadoop.mapred.MiniMRCluster#getJobTrackerPort()

The following examples show how to use org.apache.hadoop.mapred.MiniMRCluster#getJobTrackerPort() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRaidHar.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * create mapreduce and dfs clusters
 */
private void createClusters(boolean local) throws Exception {

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();
  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

  // scan all policies once every 5 second
  conf.setLong("raid.policy.rescan.interval", 5000);

  // the RaidNode does the raiding inline (instead of submitting to map/reduce)
  if (local) {
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  } else {
    conf.set("raid.classname", "org.apache.hadoop.raid.DistRaidNode");
  }
  // use local block fixer
  conf.set("raid.blockfix.classname",
           "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");

  conf.set("raid.server.address", "localhost:0");

  // create a dfs and map-reduce cluster
  final int taskTrackers = 4;

  dfs = new MiniDFSCluster(conf, 3, true, null);
  dfs.waitActive();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().toString();
  mr = new MiniMRCluster(taskTrackers, namenode, 3);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);

  Utils.loadTestCodecs(conf);
}
 
Example 2
Source File: TestSimulationBlockFixer.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void mySetup(int stripeLength, int timeBeforeHar, 
    String xorCode, String rsCode, String code) throws Exception {

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();

  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

  // scan all policies once every 5 second
  conf.setLong("raid.policy.rescan.interval", 5000);

  // do not use map-reduce cluster for Raiding
  conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  conf.set("raid.server.address", "localhost:0");

  Utils.loadTestCodecs(conf, stripeLength, stripeLength, 1, 3, "/destraid",
      "/destraidrs", true, xorCode, rsCode,
      false);

  conf.setBoolean("dfs.permissions", false);

  dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
  dfsCluster.waitActive();
  fileSys = dfsCluster.getFileSystem();
  namenode = fileSys.getUri().toString();

  FileSystem.setDefaultUri(conf, namenode);
  mr = new MiniMRCluster(4, namenode, 3);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfsCluster.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);
  
  ConfigBuilder cb = new ConfigBuilder(CONFIG_FILE);
  cb.addPolicy("RaidTest1", "/user/dhruba/raidtest", 1, 1, code);
  cb.persist();
}
 
Example 3
Source File: TestRaidShell.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Test distRaid command
 * @throws Exception
 */
public void testDistRaid() throws Exception {
  LOG.info("TestDist started.");
  // create a dfs and map-reduce cluster
  mySetup(3, -1);
  MiniMRCluster mr = new MiniMRCluster(4, namenode, 3);
  String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  conf.set("mapred.job.tracker", jobTrackerName);

  try {
    // Create files to be raided
    TestRaidNode.createTestFiles(fileSys, RAID_SRC_PATH,
        "/raid" + RAID_SRC_PATH, 1, 3, (short)3);
    String subDir = RAID_SRC_PATH + "/subdir";
    TestRaidNode.createTestFiles(
        fileSys, subDir, "/raid" + subDir, 1, 3, (short)3);
    
    // Create RaidShell and raid the files.
    RaidShell shell = new RaidShell(conf);
    String[] args = new String[3];
    args[0] = "-distRaid";
    args[1] = RAID_POLICY_NAME;
    args[2] = RAID_SRC_PATH;
    assertEquals(0, ToolRunner.run(shell, args));

    // Check files are raided
    checkIfFileRaided(new Path(RAID_SRC_PATH, "file0"));
    checkIfFileRaided(new Path(subDir, "file0"));
  } finally {
    mr.shutdown();
    myTearDown();
  }
}
 
Example 4
Source File: TestTempDirectoryCleanUp.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * create mapreduce and dfs clusters
 */
private void createClusters(boolean local) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    String base = new File(".").getAbsolutePath();
    System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs");
  }

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();
  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);
  Utils.loadTestCodecs(conf, 3, 10, 1, 5, "/raid", "/raidrs", false, false);

  conf.setLong("raid.policy.rescan.interval", 5 * 1000L);

  // the RaidNode does the raiding inline (instead of submitting to map/reduce)
  if (local) {
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  } else {
    conf.set("raid.classname", "org.apache.hadoop.raid.DistRaidNode");
  }

  // use local block fixer
  conf.set("raid.blockfix.classname", 
           "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");
  conf.set("dfs.block.replicator.classname",
      "org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyRaid");

  conf.set("raid.server.address", "localhost:0");

  // create a dfs and map-reduce cluster
  final int taskTrackers = 4;

  // Because BlockPlacementPolicyRaid only allows one replica in each rack,
  // spread 6 nodes into 6 racks to make sure chooseTarget function could pick
  // more than one node. 
  String[] racks = {"/rack1", "/rack2", "/rack3", "/rack4", "/rack5", "/rack6"};
  dfs = new MiniDFSCluster(conf, 6, true, racks);
  dfs.waitActive();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().toString();
  mr = new MiniMRCluster(taskTrackers, namenode, 3);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);
}
 
Example 5
Source File: TestFileCorruptions.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void mySetup(int stripeLength, int timeBeforeHar) throws Exception {

    new File(TEST_DIR).mkdirs(); // Make sure data directory exists
    conf = new Configuration();

    conf.set("raid.config.file", CONFIG_FILE);
    conf.setBoolean("raid.config.reload", true);
    conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

    // scan all policies once every 5 second
    conf.setLong("raid.policy.rescan.interval", 5000);

    // do not use map-reduce cluster for Raiding
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
    conf.set("raid.server.address", "localhost:0");

    conf.setBoolean("dfs.permissions", false);
    Utils.loadTestCodecs(conf, 5, 1, 3, "/destraid", "/destraidrs");

    dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfsCluster.waitActive();
    fileSys = dfsCluster.getFileSystem();
    namenode = fileSys.getUri().toString();

    FileSystem.setDefaultUri(conf, namenode);
    mr = new MiniMRCluster(4, namenode, 3);
    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
    hftp = "hftp://localhost.localdomain:" + dfsCluster.getNameNodePort();

    FileSystem.setDefaultUri(conf, namenode);
    conf.set("mapred.job.tracker", jobTrackerName);
    
    FileWriter fileWriter = new FileWriter(CONFIG_FILE);
    fileWriter.write("<?xml version=\"1.0\"?>\n");
    String str = "<configuration> " +
                     "<policy name = \"RaidTest1\"> " +
                        "<srcPath prefix=\"/user/dhruba/raidtest\"/> " +
                        "<codecId>xor</codecId> " +
                        "<destPath> /destraid</destPath> " +
                        "<property> " +
                          "<name>targetReplication</name> " +
                          "<value>1</value> " + 
                          "<description>after RAIDing, decrease the replication factor of a file to this value." +
                          "</description> " + 
                        "</property> " +
                        "<property> " +
                          "<name>metaReplication</name> " +
                          "<value>1</value> " + 
                          "<description> replication factor of parity file" +
                          "</description> " + 
                        "</property> " +
                        "<property> " +
                          "<name>modTimePeriod</name> " +
                          "<value>2000</value> " + 
                          "<description> time (milliseconds) after a file is modified to make it " +
                                         "a candidate for RAIDing " +
                          "</description> " + 
                        "</property> ";
    if (timeBeforeHar >= 0) {
      str +=
                        "<property> " +
                          "<name>time_before_har</name> " +
                          "<value>" + timeBeforeHar + "</value> " +
                          "<description> amount of time waited before har'ing parity files" +
                          "</description> " + 
                        "</property> ";
    }

    str +=
                     "</policy>" +
                 "</configuration>";
    fileWriter.write(str);
    fileWriter.close();
  }
 
Example 6
Source File: TestDirectoryBlockFixer.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void mySetup(int stripeLength) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    String base = new File(".").getAbsolutePath();
    System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs");
  }

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();

  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

  // scan all policies once every 5 second
  conf.setLong("raid.policy.rescan.interval", 5000);

  // do not use map-reduce cluster for Raiding
  conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  conf.set("raid.server.address", "localhost:0");
  conf.set("mapred.raid.http.address", "localhost:0");

  Utils.loadTestCodecs(conf, stripeLength, stripeLength, 1, 3, "/destraid",
      "/destraidrs", false, true);

  conf.setBoolean("dfs.permissions", false);

  dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
  dfsCluster.waitActive();
  fileSys = dfsCluster.getFileSystem();
  namenode = fileSys.getUri().toString();

  FileSystem.setDefaultUri(conf, namenode);
  mr = new MiniMRCluster(4, namenode, 3);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfsCluster.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);
  ConfigBuilder cb = new ConfigBuilder(CONFIG_FILE);
  cb.addPolicy("RaidTest1", "/user/dhruba/raidtest",
      1, 1);
  cb.addPolicy("RaidTest2", "/user/dhruba/raidtestrs",
      1, 1, "rs");
  cb.persist();
}
 
Example 7
Source File: TestBlockFixer.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void mySetup(int stripeLength, int timeBeforeHar) throws Exception {

    new File(TEST_DIR).mkdirs(); // Make sure data directory exists
    conf = new Configuration();

    conf.set("raid.config.file", CONFIG_FILE);
    conf.setBoolean("raid.config.reload", true);
    conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

    // scan all policies once every 5 second
    conf.setLong("raid.policy.rescan.interval", 5000);

    // do not use map-reduce cluster for Raiding
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
    conf.set("raid.server.address", "localhost:0");
    conf.set("mapred.raid.http.address", "localhost:0");

    Utils.loadTestCodecs(conf, stripeLength, 1, 3, "/destraid", "/destraidrs");

    conf.setBoolean("dfs.permissions", false);

    dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfsCluster.waitActive();
    fileSys = dfsCluster.getFileSystem();
    namenode = fileSys.getUri().toString();

    FileSystem.setDefaultUri(conf, namenode);
    mr = new MiniMRCluster(4, namenode, 3);
    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
    hftp = "hftp://localhost.localdomain:" + dfsCluster.getNameNodePort();

    FileSystem.setDefaultUri(conf, namenode);
    conf.set("mapred.job.tracker", jobTrackerName);
    
    FileWriter fileWriter = new FileWriter(CONFIG_FILE);
    fileWriter.write("<?xml version=\"1.0\"?>\n");
    String str = "<configuration> " +
                     "<policy name = \"RaidTest1\"> " +
                        "<srcPath prefix=\"/user/dhruba/raidtest\"/> " +
                        "<codecId>xor</codecId> " +
                        "<destPath> /destraid</destPath> " +
                        "<property> " +
                          "<name>targetReplication</name> " +
                          "<value>1</value> " + 
                          "<description>after RAIDing, decrease the replication factor of a file to this value." +
                          "</description> " + 
                        "</property> " +
                        "<property> " +
                          "<name>metaReplication</name> " +
                          "<value>1</value> " + 
                          "<description> replication factor of parity file" +
                          "</description> " + 
                        "</property> " +
                        "<property> " +
                          "<name>modTimePeriod</name> " +
                          "<value>2000</value> " + 
                          "<description> time (milliseconds) after a file is modified to make it " +
                                         "a candidate for RAIDing " +
                          "</description> " + 
                        "</property> ";
    if (timeBeforeHar >= 0) {
      str +=
                        "<property> " +
                          "<name>time_before_har</name> " +
                          "<value>" + timeBeforeHar + "</value> " +
                          "<description> amount of time waited before har'ing parity files" +
                          "</description> " + 
                        "</property> ";
    }

    str +=
                     "</policy>" +
                 "</configuration>";
    fileWriter.write(str);
    fileWriter.close();
  }
 
Example 8
Source File: TestRaidPurge.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * create mapreduce and dfs clusters
 */
public void createClusters(boolean local, int numNodes, 
    String[] racks, String[] hosts) throws Exception {

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();
  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

  // scan all policies once every 5 second
  conf.setLong("raid.policy.rescan.interval", 5000);
  // the RaidNode does the raiding inline (instead of submitting to map/reduce)
  if (local) {
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  } else {
    conf.set("raid.classname", "org.apache.hadoop.raid.DistRaidNode");
  }
  // use local block fixer
  conf.set("raid.blockfix.classname",
           "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");
  conf.set("dfs.block.replicator.classname",
           "org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyRaid");

  conf.set("raid.server.address", "localhost:0");
  conf.setLong("dfs.blockreport.intervalMsec", 1000L);
  
  // create a dfs and map-reduce cluster
  final int taskTrackers = numNodes;

  dfs = new MiniDFSCluster(conf, numNodes, true, racks, hosts);
  dfs.waitActive();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().toString();
  mr = new MiniMRCluster(taskTrackers, namenode, numNodes);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);
  //Don't allow empty file to be raid
  conf.setLong(RaidNode.MINIMUM_RAIDABLE_FILESIZE_KEY, 1L);
}
 
Example 9
Source File: TestMissingParity.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void createClusters(boolean local) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    String base = new File(".").getAbsolutePath();
    System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs");
  }

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();
  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

  Utils.loadTestCodecs(conf);

  // scan all policies once every 100 second
  conf.setLong("raid.policy.rescan.interval", 100 * 1000L);

  // the RaidNode does the raiding inline (instead of submitting to map/reduce)
  if (local) {
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  } else {
    conf.set("raid.classname", "org.apache.hadoop.raid.DistRaidNode");
  }

  // use local block fixer
  conf.set("raid.blockfix.classname", 
           "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");

  conf.set("raid.server.address", "localhost:0");

  // create a dfs and map-reduce cluster
  final int taskTrackers = 4;
  final int jobTrackerPort = 60050;

  dfs = new MiniDFSCluster(conf, 6, true, null);
  dfs.waitActive();
  fileSys = dfs.getFileSystem();

  Path raidRoot = new Path(Codec.getCodec("xor").parityDirectory);
  root = raidRoot.getParent();
  String file1 = "/p1/f1.txt";
  String file2 = "/p1/f2.txt";
  String file3 = "/p2/f3.txt";
  String file4 = "/p2/f4.txt";
  Path fPath1 = new Path(root + file1);
  Path fPath2 = new Path(root + file2);
  Path fPath3 = new Path(root + file3);
  Path fPath4 = new Path(root + file4);
  Path rPath3 = new Path(raidRoot + file3);
  allExpectedMissingFiles = new HashSet<String>();
  allExpectedMissingFiles.add(fPath2.toUri().getPath());
  allExpectedMissingFiles.add(fPath3.toUri().getPath());
  allExpectedMissingFiles.add(fPath4.toUri().getPath());
  fileSys.create(fPath1, (short)3);
  fileSys.create(fPath2, (short)2);
  fileSys.create(fPath3, (short)2);
  fileSys.create(fPath4, (short)2);
  fileSys.create(rPath3, (short)2);
  namenode = fileSys.getUri().toString();
  mr = new MiniMRCluster(taskTrackers, namenode, 3);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);
}