Java Code Examples for org.apache.hadoop.fs.FileSystem.setDefaultUri()

The following are Jave code examples for showing how to use setDefaultUri() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop   File: TestBlockReplacement.java   Source Code and License Vote up 6 votes
@Test
public void testThrottler() throws IOException {
  Configuration conf = new HdfsConfiguration();
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  long bandwidthPerSec = 1024*1024L;
  final long TOTAL_BYTES =6*bandwidthPerSec; 
  long bytesToSend = TOTAL_BYTES; 
  long start = Time.monotonicNow();
  DataTransferThrottler throttler = new DataTransferThrottler(bandwidthPerSec);
  long totalBytes = 0L;
  long bytesSent = 1024*512L; // 0.5MB
  throttler.throttle(bytesSent);
  bytesToSend -= bytesSent;
  bytesSent = 1024*768L; // 0.75MB
  throttler.throttle(bytesSent);
  bytesToSend -= bytesSent;
  try {
    Thread.sleep(1000);
  } catch (InterruptedException ignored) {}
  throttler.throttle(bytesToSend);
  long end = Time.monotonicNow();
  assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec);
}
 
Example 2
Project: hadoop   File: TestValidateConfigurationSettings.java   Source Code and License Vote up 6 votes
/**
 * Tests setting the rpc port to the same as the web port to test that 
 * an exception
 * is thrown when trying to re-use the same port
 */
@Test(expected = BindException.class, timeout = 300000)
public void testThatMatchingRPCandHttpPortsThrowException() 
    throws IOException {

  NameNode nameNode = null;
  try {
    Configuration conf = new HdfsConfiguration();
    File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
        nameDir.getAbsolutePath());

    Random rand = new Random();
    final int port = 30000 + rand.nextInt(30000);

    // set both of these to the same port. It should fail.
    FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port);
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port);
    DFSTestUtil.formatNameNode(conf);
    nameNode = new NameNode(conf);
  } finally {
    if (nameNode != null) {
      nameNode.stop();
    }
  }
}
 
Example 3
Project: hadoop   File: TestDistributedFileSystem.java   Source Code and License Vote up 6 votes
@Test
public void testFileSystemCloseAll() throws Exception {
  Configuration conf = getTestConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
  URI address = FileSystem.getDefaultUri(conf);

  try {
    FileSystem.closeAll();

    conf = getTestConfiguration();
    FileSystem.setDefaultUri(conf, address);
    FileSystem.get(conf);
    FileSystem.get(conf);
    FileSystem.closeAll();
  }
  finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 4
Project: hadoop-oss   File: GenericOptionsParser.java   Source Code and License Vote up 5 votes
/**
 * Modify configuration according user-specified generic options
 * @param conf Configuration to be modified
 * @param line User-specified generic options
 */
private void processGeneralOptions(Configuration conf,
    CommandLine line) throws IOException {
  if (line.hasOption("fs")) {
    FileSystem.setDefaultUri(conf, line.getOptionValue("fs"));
  }

  if (line.hasOption("jt")) {
    String optionValue = line.getOptionValue("jt");
    if (optionValue.equalsIgnoreCase("local")) {
      conf.set("mapreduce.framework.name", optionValue);
    }

    conf.set("yarn.resourcemanager.address", optionValue, 
        "from -jt command line option");
  }
  if (line.hasOption("conf")) {
    String[] values = line.getOptionValues("conf");
    for(String value : values) {
      conf.addResource(new Path(value));
    }
  }

  if (line.hasOption('D')) {
    String[] property = line.getOptionValues('D');
    for(String prop : property) {
      String[] keyval = prop.split("=", 2);
      if (keyval.length == 2) {
        conf.set(keyval[0], keyval[1], "from command line");
      }
    }
  }

  if (line.hasOption("libjars")) {
    conf.set("tmpjars", 
             validateFiles(line.getOptionValue("libjars"), conf),
             "from -libjars command line option");
    //setting libjars in client classpath
    URL[] libjars = getLibJars(conf);
    if(libjars!=null && libjars.length>0) {
      conf.setClassLoader(new URLClassLoader(libjars, conf.getClassLoader()));
      Thread.currentThread().setContextClassLoader(
          new URLClassLoader(libjars, 
              Thread.currentThread().getContextClassLoader()));
    }
  }
  if (line.hasOption("files")) {
    conf.set("tmpfiles", 
             validateFiles(line.getOptionValue("files"), conf),
             "from -files command line option");
  }
  if (line.hasOption("archives")) {
    conf.set("tmparchives", 
              validateFiles(line.getOptionValue("archives"), conf),
              "from -archives command line option");
  }
  conf.setBoolean("mapreduce.client.genericoptionsparser.used", true);
  
  // tokensFile
  if(line.hasOption("tokenCacheFile")) {
    String fileName = line.getOptionValue("tokenCacheFile");
    // check if the local file exists
    FileSystem localFs = FileSystem.getLocal(conf);
    Path p = localFs.makeQualified(new Path(fileName));
    if (!localFs.exists(p)) {
        throw new FileNotFoundException("File "+fileName+" does not exist.");
    }
    if(LOG.isDebugEnabled()) {
      LOG.debug("setting conf tokensFile: " + fileName);
    }
    UserGroupInformation.getCurrentUser().addCredentials(
        Credentials.readTokenStorageFile(p, conf));
    conf.set("mapreduce.job.credentials.binary", p.toString(),
             "from -tokenCacheFile command line option");

  }
}
 
Example 5
Project: hadoop   File: TestHDFSServerPorts.java   Source Code and License Vote up 5 votes
/**
 * Verify namenode port usage.
 */
public void runTestNameNodePorts(boolean withService) throws Exception {
  NameNode nn = null;
  try {
    nn = startNameNode(withService);

    // start another namenode on the same port
    Configuration conf2 = new HdfsConfiguration(config);
    conf2.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
        fileAsURI(new File(hdfsDir, "name2")).toString());
    DFSTestUtil.formatNameNode(conf2);
    boolean started = canStartNameNode(conf2);
    assertFalse(started); // should fail

    // start on a different main port
    FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
    started = canStartNameNode(conf2);
    assertFalse(started); // should fail again

    // reset conf2 since NameNode modifies it
    FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
    // different http port
    conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
    started = canStartNameNode(conf2);

    if (withService) {
      assertFalse("Should've failed on service port", started);

      // reset conf2 since NameNode modifies it
      FileSystem.setDefaultUri(conf2, "hdfs://" + THIS_HOST);
      conf2.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, THIS_HOST);
      // Set Service address      
      conf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,  THIS_HOST);
      started = canStartNameNode(conf2);        
    }
    assertTrue(started);
  } finally {
    stopNameNode(nn);
  }
}
 
Example 6
Project: hadoop   File: TestMiniMRClasspath.java   Source Code and License Vote up 5 votes
static void configureWordCount(FileSystem fs, JobConf conf, String input,
    int numMaps, int numReduces, Path inDir, Path outDir) throws IOException {
  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  DataOutputStream file = fs.create(new Path(inDir, "part-0"));
  file.writeBytes(input);
  file.close();
  FileSystem.setDefaultUri(conf, fs.getUri());
  conf.set(JTConfig.FRAMEWORK_NAME, JTConfig.YARN_FRAMEWORK_NAME);
  conf.setJobName("wordcount");
  conf.setInputFormat(TextInputFormat.class);
  
  // the keys are words (strings)
  conf.setOutputKeyClass(Text.class);
  // the values are counts (ints)
  conf.setOutputValueClass(IntWritable.class);
  
  conf.set("mapred.mapper.class", "testjar.ClassWordCount$MapClass");        
  conf.set("mapred.combine.class", "testjar.ClassWordCount$Reduce");
  conf.set("mapred.reducer.class", "testjar.ClassWordCount$Reduce");
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);
  //set the tests jar file
  conf.setJarByClass(TestMiniMRClasspath.class);
}
 
Example 7
Project: hadoop   File: MiniMRCluster.java   Source Code and License Vote up 5 votes
public MiniMRCluster(int jobTrackerPort, int taskTrackerPort,
    int numTaskTrackers, String namenode, int numDir, String[] racks,
    String[] hosts, UserGroupInformation ugi, JobConf conf,
    int numTrackerToExclude, Clock clock) throws IOException {
  if (conf == null) conf = new JobConf();
  FileSystem.setDefaultUri(conf, namenode);
  String identifier = this.getClass().getSimpleName() + "_"
      + Integer.toString(new Random().nextInt(Integer.MAX_VALUE));
  mrClientCluster = MiniMRClientClusterFactory.create(this.getClass(),
      identifier, numTaskTrackers, conf);
}
 
Example 8
Project: hadoop   File: TestAllowFormat.java   Source Code and License Vote up 5 votes
@BeforeClass
public static void setUp() throws Exception {
  config = new Configuration();
  if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) {
    throw new IOException("Could not delete hdfs directory '" + DFS_BASE_DIR +
                          "'");
  }
  
  // Test has multiple name directories.
  // Format should not really prompt us if one of the directories exist,
  // but is empty. So in case the test hangs on an input, it means something
  // could be wrong in the format prompting code. (HDFS-1636)
  LOG.info("hdfsdir is " + DFS_BASE_DIR.getAbsolutePath());
  File nameDir1 = new File(DFS_BASE_DIR, "name1");
  File nameDir2 = new File(DFS_BASE_DIR, "name2");

  // To test multiple directory handling, we pre-create one of the name directories.
  nameDir1.mkdirs();

  // Set multiple name directories.
  config.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir1.getPath() + "," + nameDir2.getPath());
  config.set(DFS_DATANODE_DATA_DIR_KEY, new File(DFS_BASE_DIR, "data").getPath());

  config.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(DFS_BASE_DIR, "secondary").getPath());

  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
 
Example 9
Project: hadoop   File: TestEncryptedShuffle.java   Source Code and License Vote up 5 votes
private void startCluster(Configuration  conf) throws Exception {
  if (System.getProperty("hadoop.log.dir") == null) {
    System.setProperty("hadoop.log.dir", "target/test-dir");
  }
  conf.set("dfs.block.access.token.enable", "false");
  conf.set("dfs.permissions", "true");
  conf.set("hadoop.security.authentication", "simple");
  String cp = conf.get(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
      StringUtils.join(",",
          YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH))
      + File.pathSeparator + classpathDir;
  conf.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH, cp);
  dfsCluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSystem = dfsCluster.getFileSystem();
  fileSystem.mkdirs(new Path("/tmp"));
  fileSystem.mkdirs(new Path("/user"));
  fileSystem.mkdirs(new Path("/hadoop/mapred/system"));
  fileSystem.setPermission(
    new Path("/tmp"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/user"), FsPermission.valueOf("-rwxrwxrwx"));
  fileSystem.setPermission(
    new Path("/hadoop/mapred/system"), FsPermission.valueOf("-rwx------"));
  FileSystem.setDefaultUri(conf, fileSystem.getUri());
  mrCluster = MiniMRClientClusterFactory.create(this.getClass(), 1, conf);

  // so the minicluster conf is avail to the containers.
  Writer writer = new FileWriter(classpathDir + "/core-site.xml");
  mrCluster.getConfig().writeXml(writer);
  writer.close();
}
 
Example 10
Project: hadoop   File: TestStorageRestore.java   Source Code and License Vote up 5 votes
@Before
public void setUpNameDirs() throws Exception {
  config = new HdfsConfiguration();
  hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile();
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  
  hdfsDir.mkdirs();
  path1 = new File(hdfsDir, "name1");
  path2 = new File(hdfsDir, "name2");
  path3 = new File(hdfsDir, "name3");
  
  path1.mkdir(); path2.mkdir(); path3.mkdir();
  if(!path2.exists() ||  !path3.exists() || !path1.exists()) {
    throw new IOException("Couldn't create dfs.name dirs in " + hdfsDir.getAbsolutePath());
  }
  
  String dfs_name_dir = new String(path1.getPath() + "," + path2.getPath());
  System.out.println("configuring hdfsdir is " + hdfsDir.getAbsolutePath() + 
      "; dfs_name_dir = "+ dfs_name_dir + ";dfs_name_edits_dir(only)=" + path3.getPath());
  
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dfs_name_dir);
  config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, dfs_name_dir + "," + path3.getPath());

  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());
 
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
  
  config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  
  // set the restore feature on
  config.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
}
 
Example 11
Project: hadoop   File: TestMRCJCSocketFactory.java   Source Code and License Vote up 5 votes
private Configuration getCustomSocketConfigs(final int nameNodePort) {
  // Get another reference via network using a specific socket factory
  Configuration cconf = new Configuration();
  FileSystem.setDefaultUri(cconf, String.format("hdfs://localhost:%s/",
      nameNodePort + 10));
  cconf.set("hadoop.rpc.socket.factory.class.default",
      "org.apache.hadoop.ipc.DummySocketFactory");
  cconf.set("hadoop.rpc.socket.factory.class.ClientProtocol",
      "org.apache.hadoop.ipc.DummySocketFactory");
  cconf.set("hadoop.rpc.socket.factory.class.JobSubmissionProtocol",
      "org.apache.hadoop.ipc.DummySocketFactory");
  return cconf;
}
 
Example 12
Project: hadoop   File: TestFileArgs.java   Source Code and License Vote up 5 votes
public TestFileArgs() throws IOException
{
  // Set up mini cluster
  conf = new Configuration();
  dfs = new MiniDFSCluster.Builder(conf).build();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().getAuthority();
  mr  = new MiniMRCluster(1, namenode, 1);

  map = LS_PATH;
  FileSystem.setDefaultUri(conf, "hdfs://" + namenode);
  setTestDir(new File("/tmp/TestFileArgs"));
}
 
Example 13
Project: hadoop   File: TestRefreshCallQueue.java   Source Code and License Vote up 5 votes
@Before
public void setUp() throws Exception {
  // We want to count additional events, so we reset here
  mockQueueConstructions = 0;
  mockQueuePuts = 0;
  int portRetries = 5;
  int nnPort;

  for (; portRetries > 0; --portRetries) {
    // Pick a random port in the range [30000,60000).
    nnPort = 30000 + rand.nextInt(30000);  
    config = new Configuration();
    callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl";
    config.setClass(callQueueConfigKey,
        MockCallQueue.class, BlockingQueue.class);
    config.set("hadoop.security.authorization", "true");

    FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort);
    fs = FileSystem.get(config);
    
    try {
      cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build();
      cluster.waitActive();
      break;
    } catch (BindException be) {
      // Retry with a different port number.
    }
  }
  
  if (portRetries == 0) {
    // Bail if we get very unlucky with our choice of ports.
    fail("Failed to pick an ephemeral port for the NameNode RPC server.");
  }
}
 
Example 14
Project: hadoop   File: TestDelegationToken.java   Source Code and License Vote up 5 votes
@Before
public void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  config.set("hadoop.security.auth_to_local",
      "RULE:[2:[email protected]$0]([email protected]*FOO.COM)s/@.*//" + "DEFAULT");
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
  cluster.waitActive();
  dtSecretManager = NameNodeAdapter.getDtSecretManager(
      cluster.getNamesystem());
}
 
Example 15
Project: hadoop   File: TestValidateConfigurationSettings.java   Source Code and License Vote up 5 votes
/**
 * Tests setting the rpc port to a different as the web port that an 
 * exception is NOT thrown 
 */
@Test(timeout = 300000)
public void testThatDifferentRPCandHttpPortsAreOK() 
    throws IOException {

  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      nameDir.getAbsolutePath());

  Random rand = new Random();

  // A few retries in case the ports we choose are in use.
  for (int i = 0; i < 5; ++i) {
    final int port1 = 30000 + rand.nextInt(10000);
    final int port2 = port1 + 1 + rand.nextInt(10000);

    FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port2);
    DFSTestUtil.formatNameNode(conf);
    NameNode nameNode = null;

    try {
      nameNode = new NameNode(conf); // should be OK!
      break;
    } catch(BindException be) {
      continue;     // Port in use? Try another.
    } finally {
      if (nameNode != null) {
        nameNode.stop();
      }
    }
  }
}
 
Example 16
Project: hadoop   File: TestNNThroughputBenchmark.java   Source Code and License Vote up 5 votes
/**
 * This test runs all benchmarks defined in {@link NNThroughputBenchmark}.
 */
@Test
public void testNNThroughput() throws Exception {
  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      nameDir.getAbsolutePath());
  FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  DFSTestUtil.formatNameNode(conf);
  String[] args = new String[] {"-op", "all"};
  NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(args));
}
 
Example 17
Project: hadoop   File: TestEditLogRace.java   Source Code and License Vote up 5 votes
private Configuration getConf() {
  Configuration conf = new HdfsConfiguration();
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, NAME_DIR);
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NAME_DIR);
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); 
  return conf;
}
 
Example 18
Project: hadoop   File: TestSpecialCharactersInOutputPath.java   Source Code and License Vote up 4 votes
public static boolean launchJob(URI fileSys,
                                JobConf conf,
                                int numMaps,
                                int numReduces) throws IOException {
  
  final Path inDir = new Path("/testing/input");
  final Path outDir = new Path("/testing/output");
  FileSystem fs = FileSystem.get(fileSys, conf);
  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    LOG.warn("Can't create " + inDir);
    return false;
  }
  // generate an input file
  DataOutputStream file = fs.create(new Path(inDir, "part-0"));
  file.writeBytes("foo foo2 foo3");
  file.close();

  // use WordCount example
  FileSystem.setDefaultUri(conf, fileSys);
  conf.setJobName("foo");

  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputFormat(SpecialTextOutputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);
  conf.setMapperClass(IdentityMapper.class);        
  conf.setReducerClass(IdentityReducer.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);
    
  // run job and wait for completion
  RunningJob runningJob = JobClient.runJob(conf);
    
  try {
    assertTrue(runningJob.isComplete());
    assertTrue(runningJob.isSuccessful());
    assertTrue("Output folder not found!", fs.exists(new Path("/testing/output/" + OUTPUT_FILENAME)));
  } catch (NullPointerException npe) {
    // This NPE should no more happens
    fail("A NPE should not have happened.");
  }
        
  // return job result
  LOG.info("job is complete: " + runningJob.isSuccessful());
  return (runningJob.isSuccessful());
}
 
Example 19
Project: hadoop   File: TestBlockStoragePolicy.java   Source Code and License Vote up 4 votes
@Test
public void testChooseTargetWithTopology() throws Exception {
  BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
  BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2",
      new StorageType[]{StorageType.DISK, StorageType.SSD,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] types = {StorageType.DISK, StorageType.SSD,
      StorageType.ARCHIVE};

  final DatanodeStorageInfo[] storages = DFSTestUtil
      .createDatanodeStorageInfos(3, racks, hosts, types);
  final DatanodeDescriptor[] dataNodes = DFSTestUtil
      .toDatanodeDescriptor(storages);

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy1);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
  targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy2);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
}
 
Example 20
Project: hadoop   File: TestBlockStoragePolicy.java   Source Code and License Vote up 4 votes
@Test
public void testChooseSsdOverDisk() throws Exception {
  BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};

  final DatanodeStorageInfo[] diskStorages
      = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
  final DatanodeDescriptor[] dataNodes
      = DFSTestUtil.toDatanodeDescriptor(diskStorages);
  for(int i = 0; i < dataNodes.length; i++) {
    BlockManagerTestUtil.updateStorage(dataNodes[i],
        new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
            StorageType.SSD));
  }

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy);
  System.out.println(policy.getName() + ": " + Arrays.asList(targets));
  Assert.assertEquals(2, targets.length);
  Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
  Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}