Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#getNameNodePort()

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster#getNameNodePort() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRaidHar.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * create mapreduce and dfs clusters
 */
private void createClusters(boolean local) throws Exception {

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();
  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

  // scan all policies once every 5 second
  conf.setLong("raid.policy.rescan.interval", 5000);

  // the RaidNode does the raiding inline (instead of submitting to map/reduce)
  if (local) {
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  } else {
    conf.set("raid.classname", "org.apache.hadoop.raid.DistRaidNode");
  }
  // use local block fixer
  conf.set("raid.blockfix.classname",
           "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");

  conf.set("raid.server.address", "localhost:0");

  // create a dfs and map-reduce cluster
  final int taskTrackers = 4;

  dfs = new MiniDFSCluster(conf, 3, true, null);
  dfs.waitActive();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().toString();
  mr = new MiniMRCluster(taskTrackers, namenode, 3);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);

  Utils.loadTestCodecs(conf);
}
 
Example 2
Source File: TestSimulationBlockFixer.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void mySetup(int stripeLength, int timeBeforeHar, 
    String xorCode, String rsCode, String code) throws Exception {

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();

  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

  // scan all policies once every 5 second
  conf.setLong("raid.policy.rescan.interval", 5000);

  // do not use map-reduce cluster for Raiding
  conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  conf.set("raid.server.address", "localhost:0");

  Utils.loadTestCodecs(conf, stripeLength, stripeLength, 1, 3, "/destraid",
      "/destraidrs", true, xorCode, rsCode,
      false);

  conf.setBoolean("dfs.permissions", false);

  dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
  dfsCluster.waitActive();
  fileSys = dfsCluster.getFileSystem();
  namenode = fileSys.getUri().toString();

  FileSystem.setDefaultUri(conf, namenode);
  mr = new MiniMRCluster(4, namenode, 3);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfsCluster.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);
  
  ConfigBuilder cb = new ConfigBuilder(CONFIG_FILE);
  cb.addPolicy("RaidTest1", "/user/dhruba/raidtest", 1, 1, code);
  cb.persist();
}
 
Example 3
Source File: TestDirectoryRaidDfs.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void mySetup(
    String erasureCode, int rsParityLength) throws Exception {
  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();

  conf.setInt("raid.encoder.bufsize", 128);
  conf.setInt("raid.decoder.bufsize", 128);

  Utils.loadTestCodecs(conf, stripeLength, stripeLength, 1, rsParityLength, "/destraid",
      "/destraidrs", false, true);
  codec = Codec.getCodec(erasureCode);

  // scan all policies once every 5 second
  conf.setLong("raid.policy.rescan.interval", 5000);

  // Reduce run time for the test.
  conf.setInt("dfs.client.max.block.acquire.failures", 1);
  conf.setInt("dfs.client.baseTimeWindow.waitOn.BlockMissingException", 10);

  // do not use map-reduce cluster for Raiding
  conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");

  conf.set("raid.server.address", "localhost:0");
  // Avoid datanode putting blocks under subdir directory. Corruptblock function
  // can only corrupt blocks under the current directory
  conf.setInt("dfs.datanode.numblocks", 1000);

  dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
  dfs.waitActive();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().toString();
  hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  //Don't allow empty file to be raid
  conf.setLong(RaidNode.MINIMUM_RAIDABLE_FILESIZE_KEY, 1L);
}
 
Example 4
Source File: TestReadConstruction.java    From RDFS with Apache License 2.0 5 votes vote down vote up
protected void mySetup() throws Exception {
  conf = new Configuration();
  if (System.getProperty("hadoop.log.dir") == null) {
    String base = new File(".").getAbsolutePath();
    System.setProperty("hadoop.log.dir", new Path(base).toString() 
        + "/logs");
  }
  

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

  // the RaidNode does the raiding inline
  conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  // use local block fixer
  conf.set("raid.blockfix.classname", 
      "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");

  conf.set("raid.server.address", "localhost:0");
  Utils.loadTestCodecs(conf, 10, 1, 4, "/destraid", "/destraidrs");
  conf.setInt("fs.trash.interval", 1440);

  dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
  dfs.waitActive();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().toString();
  hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
}
 
Example 5
Source File: TestTraceAdmin.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private String getHostPortForNN(MiniDFSCluster cluster) {
  return "127.0.0.1:" + cluster.getNameNodePort();
}
 
Example 6
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test case where the name node is reformatted while the secondary namenode
 * is running. The secondary should shut itself down if if talks to a NN
 * with the wrong namespace.
 */
@Test
public void testReformatNNBetweenCheckpoints() throws IOException {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  
  Configuration conf = new HdfsConfiguration();
  conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      1);

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
        .format(true).build();
    int origPort = cluster.getNameNodePort();
    int origHttpPort = cluster.getNameNode().getHttpAddress().getPort();
    Configuration snnConf = new Configuration(conf);
    File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(),
      "namesecondary");
    snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      checkpointDir.getAbsolutePath());
    secondary = startSecondaryNameNode(snnConf);

    // secondary checkpoints once
    secondary.doCheckpoint();

    // we reformat primary NN
    cluster.shutdown();
    cluster = null;

    // Brief sleep to make sure that the 2NN's IPC connection to the NN
    // is dropped.
    try {
      Thread.sleep(100);
    } catch (InterruptedException ie) {
    }
    
    // Start a new NN with the same host/port.
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(0)
        .nameNodePort(origPort)
        .nameNodeHttpPort(origHttpPort)
        .format(true).build();

    try {
      secondary.doCheckpoint();
      fail("Should have failed checkpoint against a different namespace");
    } catch (IOException ioe) {
      LOG.info("Got expected failure", ioe);
      assertTrue(ioe.toString().contains("Inconsistent checkpoint"));
    }
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }  
}
 
Example 7
Source File: TestTraceAdmin.java    From big-c with Apache License 2.0 4 votes vote down vote up
private String getHostPortForNN(MiniDFSCluster cluster) {
  return "127.0.0.1:" + cluster.getNameNodePort();
}
 
Example 8
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test case where the name node is reformatted while the secondary namenode
 * is running. The secondary should shut itself down if if talks to a NN
 * with the wrong namespace.
 */
@Test
public void testReformatNNBetweenCheckpoints() throws IOException {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  
  Configuration conf = new HdfsConfiguration();
  conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      1);

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
        .format(true).build();
    int origPort = cluster.getNameNodePort();
    int origHttpPort = cluster.getNameNode().getHttpAddress().getPort();
    Configuration snnConf = new Configuration(conf);
    File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(),
      "namesecondary");
    snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      checkpointDir.getAbsolutePath());
    secondary = startSecondaryNameNode(snnConf);

    // secondary checkpoints once
    secondary.doCheckpoint();

    // we reformat primary NN
    cluster.shutdown();
    cluster = null;

    // Brief sleep to make sure that the 2NN's IPC connection to the NN
    // is dropped.
    try {
      Thread.sleep(100);
    } catch (InterruptedException ie) {
    }
    
    // Start a new NN with the same host/port.
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(0)
        .nameNodePort(origPort)
        .nameNodeHttpPort(origHttpPort)
        .format(true).build();

    try {
      secondary.doCheckpoint();
      fail("Should have failed checkpoint against a different namespace");
    } catch (IOException ioe) {
      LOG.info("Got expected failure", ioe);
      assertTrue(ioe.toString().contains("Inconsistent checkpoint"));
    }
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }  
}
 
Example 9
Source File: TestTempDirectoryCleanUp.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * create mapreduce and dfs clusters
 */
private void createClusters(boolean local) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    String base = new File(".").getAbsolutePath();
    System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs");
  }

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();
  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);
  Utils.loadTestCodecs(conf, 3, 10, 1, 5, "/raid", "/raidrs", false, false);

  conf.setLong("raid.policy.rescan.interval", 5 * 1000L);

  // the RaidNode does the raiding inline (instead of submitting to map/reduce)
  if (local) {
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  } else {
    conf.set("raid.classname", "org.apache.hadoop.raid.DistRaidNode");
  }

  // use local block fixer
  conf.set("raid.blockfix.classname", 
           "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");
  conf.set("dfs.block.replicator.classname",
      "org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyRaid");

  conf.set("raid.server.address", "localhost:0");

  // create a dfs and map-reduce cluster
  final int taskTrackers = 4;

  // Because BlockPlacementPolicyRaid only allows one replica in each rack,
  // spread 6 nodes into 6 racks to make sure chooseTarget function could pick
  // more than one node. 
  String[] racks = {"/rack1", "/rack2", "/rack3", "/rack4", "/rack5", "/rack6"};
  dfs = new MiniDFSCluster(conf, 6, true, racks);
  dfs.waitActive();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().toString();
  mr = new MiniMRCluster(taskTrackers, namenode, 3);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);
}
 
Example 10
Source File: TestFileCorruptions.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void mySetup(int stripeLength, int timeBeforeHar) throws Exception {

    new File(TEST_DIR).mkdirs(); // Make sure data directory exists
    conf = new Configuration();

    conf.set("raid.config.file", CONFIG_FILE);
    conf.setBoolean("raid.config.reload", true);
    conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

    // scan all policies once every 5 second
    conf.setLong("raid.policy.rescan.interval", 5000);

    // do not use map-reduce cluster for Raiding
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
    conf.set("raid.server.address", "localhost:0");

    conf.setBoolean("dfs.permissions", false);
    Utils.loadTestCodecs(conf, 5, 1, 3, "/destraid", "/destraidrs");

    dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfsCluster.waitActive();
    fileSys = dfsCluster.getFileSystem();
    namenode = fileSys.getUri().toString();

    FileSystem.setDefaultUri(conf, namenode);
    mr = new MiniMRCluster(4, namenode, 3);
    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
    hftp = "hftp://localhost.localdomain:" + dfsCluster.getNameNodePort();

    FileSystem.setDefaultUri(conf, namenode);
    conf.set("mapred.job.tracker", jobTrackerName);
    
    FileWriter fileWriter = new FileWriter(CONFIG_FILE);
    fileWriter.write("<?xml version=\"1.0\"?>\n");
    String str = "<configuration> " +
                     "<policy name = \"RaidTest1\"> " +
                        "<srcPath prefix=\"/user/dhruba/raidtest\"/> " +
                        "<codecId>xor</codecId> " +
                        "<destPath> /destraid</destPath> " +
                        "<property> " +
                          "<name>targetReplication</name> " +
                          "<value>1</value> " + 
                          "<description>after RAIDing, decrease the replication factor of a file to this value." +
                          "</description> " + 
                        "</property> " +
                        "<property> " +
                          "<name>metaReplication</name> " +
                          "<value>1</value> " + 
                          "<description> replication factor of parity file" +
                          "</description> " + 
                        "</property> " +
                        "<property> " +
                          "<name>modTimePeriod</name> " +
                          "<value>2000</value> " + 
                          "<description> time (milliseconds) after a file is modified to make it " +
                                         "a candidate for RAIDing " +
                          "</description> " + 
                        "</property> ";
    if (timeBeforeHar >= 0) {
      str +=
                        "<property> " +
                          "<name>time_before_har</name> " +
                          "<value>" + timeBeforeHar + "</value> " +
                          "<description> amount of time waited before har'ing parity files" +
                          "</description> " + 
                        "</property> ";
    }

    str +=
                     "</policy>" +
                 "</configuration>";
    fileWriter.write(str);
    fileWriter.close();
  }
 
Example 11
Source File: TestDirectoryBlockFixer.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void mySetup(int stripeLength) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    String base = new File(".").getAbsolutePath();
    System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs");
  }

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();

  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

  // scan all policies once every 5 second
  conf.setLong("raid.policy.rescan.interval", 5000);

  // do not use map-reduce cluster for Raiding
  conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  conf.set("raid.server.address", "localhost:0");
  conf.set("mapred.raid.http.address", "localhost:0");

  Utils.loadTestCodecs(conf, stripeLength, stripeLength, 1, 3, "/destraid",
      "/destraidrs", false, true);

  conf.setBoolean("dfs.permissions", false);

  dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
  dfsCluster.waitActive();
  fileSys = dfsCluster.getFileSystem();
  namenode = fileSys.getUri().toString();

  FileSystem.setDefaultUri(conf, namenode);
  mr = new MiniMRCluster(4, namenode, 3);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfsCluster.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);
  ConfigBuilder cb = new ConfigBuilder(CONFIG_FILE);
  cb.addPolicy("RaidTest1", "/user/dhruba/raidtest",
      1, 1);
  cb.addPolicy("RaidTest2", "/user/dhruba/raidtestrs",
      1, 1, "rs");
  cb.persist();
}
 
Example 12
Source File: TestBlockFixer.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void mySetup(int stripeLength, int timeBeforeHar) throws Exception {

    new File(TEST_DIR).mkdirs(); // Make sure data directory exists
    conf = new Configuration();

    conf.set("raid.config.file", CONFIG_FILE);
    conf.setBoolean("raid.config.reload", true);
    conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

    // scan all policies once every 5 second
    conf.setLong("raid.policy.rescan.interval", 5000);

    // do not use map-reduce cluster for Raiding
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
    conf.set("raid.server.address", "localhost:0");
    conf.set("mapred.raid.http.address", "localhost:0");

    Utils.loadTestCodecs(conf, stripeLength, 1, 3, "/destraid", "/destraidrs");

    conf.setBoolean("dfs.permissions", false);

    dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfsCluster.waitActive();
    fileSys = dfsCluster.getFileSystem();
    namenode = fileSys.getUri().toString();

    FileSystem.setDefaultUri(conf, namenode);
    mr = new MiniMRCluster(4, namenode, 3);
    jobTrackerName = "localhost:" + mr.getJobTrackerPort();
    hftp = "hftp://localhost.localdomain:" + dfsCluster.getNameNodePort();

    FileSystem.setDefaultUri(conf, namenode);
    conf.set("mapred.job.tracker", jobTrackerName);
    
    FileWriter fileWriter = new FileWriter(CONFIG_FILE);
    fileWriter.write("<?xml version=\"1.0\"?>\n");
    String str = "<configuration> " +
                     "<policy name = \"RaidTest1\"> " +
                        "<srcPath prefix=\"/user/dhruba/raidtest\"/> " +
                        "<codecId>xor</codecId> " +
                        "<destPath> /destraid</destPath> " +
                        "<property> " +
                          "<name>targetReplication</name> " +
                          "<value>1</value> " + 
                          "<description>after RAIDing, decrease the replication factor of a file to this value." +
                          "</description> " + 
                        "</property> " +
                        "<property> " +
                          "<name>metaReplication</name> " +
                          "<value>1</value> " + 
                          "<description> replication factor of parity file" +
                          "</description> " + 
                        "</property> " +
                        "<property> " +
                          "<name>modTimePeriod</name> " +
                          "<value>2000</value> " + 
                          "<description> time (milliseconds) after a file is modified to make it " +
                                         "a candidate for RAIDing " +
                          "</description> " + 
                        "</property> ";
    if (timeBeforeHar >= 0) {
      str +=
                        "<property> " +
                          "<name>time_before_har</name> " +
                          "<value>" + timeBeforeHar + "</value> " +
                          "<description> amount of time waited before har'ing parity files" +
                          "</description> " + 
                        "</property> ";
    }

    str +=
                     "</policy>" +
                 "</configuration>";
    fileWriter.write(str);
    fileWriter.close();
  }
 
Example 13
Source File: TestRaidShell.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void mySetup(int stripeLength, int timeBeforeHar) throws Exception {

    new File(TEST_DIR).mkdirs(); // Make sure data directory exists
    conf = new Configuration();

    conf.set("raid.config.file", CONFIG_FILE);
    conf.setBoolean("raid.config.reload", true);
    conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

    // scan all policies once every 5 second
    conf.setLong("raid.policy.rescan.interval", 5000);

    // do not use map-reduce cluster for Raiding
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
    conf.set("raid.server.address", "localhost:0");

    conf.setBoolean("dfs.permissions", false);

    dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
    dfsCluster.waitActive();
    fileSys = dfsCluster.getFileSystem();
    namenode = fileSys.getUri().toString();

    FileSystem.setDefaultUri(conf, namenode);
    hftp = "hftp://localhost.localdomain:" + dfsCluster.getNameNodePort();

    FileSystem.setDefaultUri(conf, namenode);

    FileWriter fileWriter = new FileWriter(CONFIG_FILE);
    fileWriter.write("<?xml version=\"1.0\"?>\n");
    String str = "<configuration> " +
                   "<policy name = \"" + RAID_POLICY_NAME + "\"> " +
                        "<srcPath prefix=\"" + RAID_SRC_PATH + "\"/> " +
                        "<codecId>xor</codecId> " +
                        "<destPath> /raid</destPath> " +
                        "<property> " +
                          "<name>targetReplication</name> " +
                          "<value>1</value> " +
                          "<description>after RAIDing, decrease the replication factor of a file to this value." +
                          "</description> " +
                        "</property> " +
                        "<property> " +
                          "<name>metaReplication</name> " +
                          "<value>1</value> " +
                          "<description> replication factor of parity file" +
                          "</description> " +
                        "</property> " +
                        "<property> " +
                          "<name>modTimePeriod</name> " +
                          "<value>0</value> " +
                          "<description> time (milliseconds) after a file is modified to make it " +
                                         "a candidate for RAIDing " +
                          "</description> " +
                        "</property> ";
    if (timeBeforeHar >= 0) {
      str +=
                        "<property> " +
                          "<name>time_before_har</name> " +
                          "<value>" + timeBeforeHar + "</value> " +
                          "<description> amount of time waited before har'ing parity files" +
                          "</description> " +
                        "</property> ";
    }

    str +=
                   "</policy>" +
                 "</configuration>";
    fileWriter.write(str);
    fileWriter.close();

    Utils.loadTestCodecs(conf, stripeLength, 1, 3, "/raid", "/raidrs");
  }
 
Example 14
Source File: TestParityMovement.java    From RDFS with Apache License 2.0 4 votes vote down vote up
protected void mySetup(String erasureCode, int rsParityLength) 
    throws Exception {
  conf = new Configuration();
  if (System.getProperty("hadoop.log.dir") == null) {
    String base = new File(".").getAbsolutePath();
    System.setProperty("hadoop.log.dir", new Path(base).toString() 
                        + "/logs");
  }
  
  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);
  
  // the RaidNode does the raiding inline
  conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  // use local block fixer
  conf.set("raid.blockfix.classname", 
      "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");
  
  conf.set("raid.server.address", "localhost:0");
  conf.setInt("fs.trash.interval", 1440);
  Utils.loadTestCodecs(conf, 5, 1, 3, "/destraid", "/destraidrs");
  
  dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
  dfs.waitActive();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().toString();
  hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();
  
  FileSystem.setDefaultUri(conf, namenode);
  
  FileWriter fileWriter = new FileWriter(CONFIG_FILE);
  fileWriter.write("<?xml version=\"1.0\"?>\n");
  String str = "<configuration> " +
      "<policy name = \"RaidTest1\"> " +
        "<srcPath prefix=\"/user/dhruba/raidtest\"/> " +
        "<codecId>xor</codecId> " +
        "<destPath> /destraid</destPath> " +
        "<property> " +
          "<name>targetReplication</name> " +
          "<value>1</value> " + 
          "<description>after RAIDing, " +
          "decrease the replication factor of a file to this value." +
          "</description> " + 
        "</property> " +
        "<property> " +
          "<name>metaReplication</name> " +
          "<value>1</value> " + 
          "<description> replication factor of parity file" +
          "</description> " + 
        "</property> " +
        "<property> " +
          "<name>modTimePeriod</name> " +
          "<value>2000</value> " + 
          "<description> time (milliseconds) " +
            "after a file is modified to make it " +
            "a candidate for RAIDing " +
          "</description> " + 
        "</property> " +
      "</policy>" +
      "</configuration>";
  fileWriter.write(str);
  fileWriter.close();
}
 
Example 15
Source File: TestRaidPurge.java    From RDFS with Apache License 2.0 4 votes vote down vote up
/**
 * create mapreduce and dfs clusters
 */
public void createClusters(boolean local, int numNodes, 
    String[] racks, String[] hosts) throws Exception {

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();
  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

  // scan all policies once every 5 second
  conf.setLong("raid.policy.rescan.interval", 5000);
  // the RaidNode does the raiding inline (instead of submitting to map/reduce)
  if (local) {
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  } else {
    conf.set("raid.classname", "org.apache.hadoop.raid.DistRaidNode");
  }
  // use local block fixer
  conf.set("raid.blockfix.classname",
           "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");
  conf.set("dfs.block.replicator.classname",
           "org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyRaid");

  conf.set("raid.server.address", "localhost:0");
  conf.setLong("dfs.blockreport.intervalMsec", 1000L);
  
  // create a dfs and map-reduce cluster
  final int taskTrackers = numNodes;

  dfs = new MiniDFSCluster(conf, numNodes, true, racks, hosts);
  dfs.waitActive();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().toString();
  mr = new MiniMRCluster(taskTrackers, namenode, numNodes);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);
  //Don't allow empty file to be raid
  conf.setLong(RaidNode.MINIMUM_RAIDABLE_FILESIZE_KEY, 1L);
}
 
Example 16
Source File: TestMissingParity.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void createClusters(boolean local) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    String base = new File(".").getAbsolutePath();
    System.setProperty("hadoop.log.dir", new Path(base).toString() + "/logs");
  }

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();
  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

  Utils.loadTestCodecs(conf);

  // scan all policies once every 100 second
  conf.setLong("raid.policy.rescan.interval", 100 * 1000L);

  // the RaidNode does the raiding inline (instead of submitting to map/reduce)
  if (local) {
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  } else {
    conf.set("raid.classname", "org.apache.hadoop.raid.DistRaidNode");
  }

  // use local block fixer
  conf.set("raid.blockfix.classname", 
           "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");

  conf.set("raid.server.address", "localhost:0");

  // create a dfs and map-reduce cluster
  final int taskTrackers = 4;
  final int jobTrackerPort = 60050;

  dfs = new MiniDFSCluster(conf, 6, true, null);
  dfs.waitActive();
  fileSys = dfs.getFileSystem();

  Path raidRoot = new Path(Codec.getCodec("xor").parityDirectory);
  root = raidRoot.getParent();
  String file1 = "/p1/f1.txt";
  String file2 = "/p1/f2.txt";
  String file3 = "/p2/f3.txt";
  String file4 = "/p2/f4.txt";
  Path fPath1 = new Path(root + file1);
  Path fPath2 = new Path(root + file2);
  Path fPath3 = new Path(root + file3);
  Path fPath4 = new Path(root + file4);
  Path rPath3 = new Path(raidRoot + file3);
  allExpectedMissingFiles = new HashSet<String>();
  allExpectedMissingFiles.add(fPath2.toUri().getPath());
  allExpectedMissingFiles.add(fPath3.toUri().getPath());
  allExpectedMissingFiles.add(fPath4.toUri().getPath());
  fileSys.create(fPath1, (short)3);
  fileSys.create(fPath2, (short)2);
  fileSys.create(fPath3, (short)2);
  fileSys.create(fPath4, (short)2);
  fileSys.create(rPath3, (short)2);
  namenode = fileSys.getUri().toString();
  mr = new MiniMRCluster(taskTrackers, namenode, 3);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);
}