Java Code Examples for org.apache.hadoop.fs.FileSystem#setDefaultUri()

The following examples show how to use org.apache.hadoop.fs.FileSystem#setDefaultUri() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHDFSServerPorts.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Start the name-node.
 */
public NameNode startNameNode() throws IOException {
  String dataDir = System.getProperty("test.build.data");
  hdfsDir = new File(dataDir, "dfs");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  config = new Configuration();
  config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath());
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
  config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
  NameNode.format(config);

  String[] args = new String[] {};
  // NameNode will modify config with the ports it bound to
  return NameNode.createNameNode(args, config);
}
 
Example 2
Source File: TestDelegationTokenForProxyUser.java    From big-c with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setStrings(DefaultImpersonationProvider.getTestProvider().
          getProxySuperuserGroupConfKey(REAL_USER),
      "group1");
  config.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  configureSuperUserIPAddresses(config, REAL_USER);
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).build();
  cluster.waitActive();
  ProxyUsers.refreshSuperUserGroupsConfiguration(config);
  ugi = UserGroupInformation.createRemoteUser(REAL_USER);
  proxyUgi = UserGroupInformation.createProxyUserForTesting(PROXY_USER, ugi,
      GROUP_NAMES);
}
 
Example 3
Source File: TestAllowFormat.java    From RDFS with Apache License 2.0 6 votes vote down vote up
protected void setUp() throws Exception {
  config = new Configuration();
  String baseDir = System.getProperty("test.build.data", "/tmp");

  hdfsDir = new File(baseDir, "dfs");
  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
  config.set("dfs.name.dir", new File(hdfsDir, "name").getPath());
  config.set("dfs.data.dir", new File(hdfsDir, "data").getPath());

  config.set("fs.checkpoint.dir",new File(hdfsDir, "secondary").getPath());

  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
 
Example 4
Source File: DownloadHdfsFileBuilder.java    From kite with Apache License 2.0 6 votes vote down vote up
public DownloadHdfsFile(CommandBuilder builder, Config config, Command parent, Command child, MorphlineContext context) 
    throws IOException {
  
  super(builder, config, parent, child, context);
  List<String> uris = getConfigs().getStringList(config, "inputFiles", Collections.<String>emptyList()); 
  File dstRootDir = new File(getConfigs().getString(config, "outputDir", "."));
  Configuration conf = new Configuration();
  String defaultFileSystemUri = getConfigs().getString(config, "fs", null);
  if (defaultFileSystemUri != null) {
    FileSystem.setDefaultUri(conf, defaultFileSystemUri); // see Hadoop's GenericOptionsParser
  }
  for (String value : getConfigs().getStringList(config, "conf", Collections.<String>emptyList())) {
    conf.addResource(new Path(value)); // see Hadoop's GenericOptionsParser
  }
  validateArguments();
  download(uris, conf, dstRootDir);
}
 
Example 5
Source File: TestRaidHar.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * create mapreduce and dfs clusters
 */
private void createClusters(boolean local) throws Exception {

  new File(TEST_DIR).mkdirs(); // Make sure data directory exists
  conf = new Configuration();
  conf.set("raid.config.file", CONFIG_FILE);
  conf.setBoolean("raid.config.reload", true);
  conf.setLong("raid.config.reload.interval", RELOAD_INTERVAL);

  // scan all policies once every 5 second
  conf.setLong("raid.policy.rescan.interval", 5000);

  // the RaidNode does the raiding inline (instead of submitting to map/reduce)
  if (local) {
    conf.set("raid.classname", "org.apache.hadoop.raid.LocalRaidNode");
  } else {
    conf.set("raid.classname", "org.apache.hadoop.raid.DistRaidNode");
  }
  // use local block fixer
  conf.set("raid.blockfix.classname",
           "org.apache.hadoop.raid.LocalBlockIntegrityMonitor");

  conf.set("raid.server.address", "localhost:0");

  // create a dfs and map-reduce cluster
  final int taskTrackers = 4;

  dfs = new MiniDFSCluster(conf, 3, true, null);
  dfs.waitActive();
  fileSys = dfs.getFileSystem();
  namenode = fileSys.getUri().toString();
  mr = new MiniMRCluster(taskTrackers, namenode, 3);
  jobTrackerName = "localhost:" + mr.getJobTrackerPort();
  hftp = "hftp://localhost.localdomain:" + dfs.getNameNodePort();

  FileSystem.setDefaultUri(conf, namenode);
  conf.set("mapred.job.tracker", jobTrackerName);

  Utils.loadTestCodecs(conf);
}
 
Example 6
Source File: TestAllowFormat.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  config = new Configuration();
  if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) {
    throw new IOException("Could not delete hdfs directory '" + DFS_BASE_DIR +
                          "'");
  }
  
  // Test has multiple name directories.
  // Format should not really prompt us if one of the directories exist,
  // but is empty. So in case the test hangs on an input, it means something
  // could be wrong in the format prompting code. (HDFS-1636)
  LOG.info("hdfsdir is " + DFS_BASE_DIR.getAbsolutePath());
  File nameDir1 = new File(DFS_BASE_DIR, "name1");
  File nameDir2 = new File(DFS_BASE_DIR, "name2");

  // To test multiple directory handling, we pre-create one of the name directories.
  nameDir1.mkdirs();

  // Set multiple name directories.
  config.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir1.getPath() + "," + nameDir2.getPath());
  config.set(DFS_DATANODE_DATA_DIR_KEY, new File(DFS_BASE_DIR, "data").getPath());

  config.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(DFS_BASE_DIR, "secondary").getPath());

  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
 
Example 7
Source File: TestValidateConfigurationSettings.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Tests setting the rpc port to a different as the web port that an 
 * exception is NOT thrown 
 */
@Test(timeout = 300000)
public void testThatDifferentRPCandHttpPortsAreOK() 
    throws IOException {

  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      nameDir.getAbsolutePath());

  Random rand = new Random();

  // A few retries in case the ports we choose are in use.
  for (int i = 0; i < 5; ++i) {
    final int port1 = 30000 + rand.nextInt(10000);
    final int port2 = port1 + 1 + rand.nextInt(10000);

    FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port2);
    DFSTestUtil.formatNameNode(conf);
    NameNode nameNode = null;

    try {
      nameNode = new NameNode(conf); // should be OK!
      break;
    } catch(BindException be) {
      continue;     // Port in use? Try another.
    } finally {
      if (nameNode != null) {
        nameNode.stop();
      }
    }
  }
}
 
Example 8
Source File: TestDefaultNameNodePort.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testGetAddressFromConf() throws Exception {
  Configuration conf = new HdfsConfiguration();
  FileSystem.setDefaultUri(conf, "hdfs://foo/");
  assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
  FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
  assertEquals(NameNode.getAddress(conf).getPort(), 555);
  FileSystem.setDefaultUri(conf, "foo");
  assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
}
 
Example 9
Source File: TestDefaultNameNodePort.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testGetAddressFromConf() throws Exception {
  Configuration conf = new HdfsConfiguration();
  FileSystem.setDefaultUri(conf, "hdfs://foo/");
  assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
  FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
  assertEquals(NameNode.getAddress(conf).getPort(), 555);
  FileSystem.setDefaultUri(conf, "foo");
  assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
}
 
Example 10
Source File: TestDelegationToken.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  config.set("hadoop.security.auth_to_local",
      "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
  cluster.waitActive();
  dtSecretManager = NameNodeAdapter.getDtSecretManager(
      cluster.getNamesystem());
}
 
Example 11
Source File: TestNNThroughputBenchmark.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * This test runs all benchmarks defined in {@link NNThroughputBenchmark}.
 */
public void testNNThroughput() throws Exception {
  Configuration conf = new Configuration();
  FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);
  conf.set("dfs.http.address", "0.0.0.0:0");
  NameNode.format(conf);
  String[] args = new String[] {"-op", "all"};
  NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(args));
}
 
Example 12
Source File: TestNNThroughputBenchmark.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * This test runs all benchmarks defined in {@link NNThroughputBenchmark}.
 */
@Test
public void testNNThroughput() throws Exception {
  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      nameDir.getAbsolutePath());
  FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  DFSTestUtil.formatNameNode(conf);
  String[] args = new String[] {"-op", "all"};
  NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(args));
}
 
Example 13
Source File: TestRefreshCallQueue.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  // We want to count additional events, so we reset here
  mockQueueConstructions = 0;
  mockQueuePuts = 0;
  int portRetries = 5;
  int nnPort;

  for (; portRetries > 0; --portRetries) {
    // Pick a random port in the range [30000,60000).
    nnPort = 30000 + rand.nextInt(30000);  
    config = new Configuration();
    callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl";
    config.setClass(callQueueConfigKey,
        MockCallQueue.class, BlockingQueue.class);
    config.set("hadoop.security.authorization", "true");

    FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort);
    fs = FileSystem.get(config);
    
    try {
      cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build();
      cluster.waitActive();
      break;
    } catch (BindException be) {
      // Retry with a different port number.
    }
  }
  
  if (portRetries == 0) {
    // Bail if we get very unlucky with our choice of ports.
    fail("Failed to pick an ephemeral port for the NameNode RPC server.");
  }
}
 
Example 14
Source File: NameNode.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize name-node.
 * 
 * @param conf the configuration
 */
private void initialize(Configuration conf) throws IOException {
  InetSocketAddress socAddr = NameNode.getAddress(conf);
  int handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
  
  // set service-level authorization security policy
  if (serviceAuthEnabled = 
        conf.getBoolean(
          ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
    PolicyProvider policyProvider = 
      (PolicyProvider)(ReflectionUtils.newInstance(
          conf.getClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
              HDFSPolicyProvider.class, PolicyProvider.class), 
          conf));
    SecurityUtil.setPolicy(new ConfiguredPolicy(conf, policyProvider));
  }

  // create rpc server 
  this.server = RPC.getServer(this, socAddr.getHostName(), socAddr.getPort(),
                              handlerCount, false, conf);

  // The rpc-server port can be ephemeral... ensure we have the correct info
  this.serverAddress = this.server.getListenerAddress(); 
  FileSystem.setDefaultUri(conf, getUri(serverAddress));
  LOG.info("Namenode up at: " + this.serverAddress);

  myMetrics = new NameNodeMetrics(conf, this);

  this.namesystem = new FSNamesystem(this, conf);
  startHttpServer(conf);
  this.server.start();  //start RPC server   
  startTrashEmptier(conf);
}
 
Example 15
Source File: TestSaveNamespace.java    From big-c with Apache License 2.0 5 votes vote down vote up
private Configuration getConf() throws IOException {
  String baseDir = MiniDFSCluster.getBaseDirectory();
  String nameDirs = fileAsURI(new File(baseDir, "name1")) + "," + 
                    fileAsURI(new File(baseDir, "name2"));

  Configuration conf = new HdfsConfiguration();
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDirs);
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDirs);
  conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); 
  return conf;
}
 
Example 16
Source File: TestSpecialCharactersInOutputPath.java    From big-c with Apache License 2.0 4 votes vote down vote up
public static boolean launchJob(URI fileSys,
                                JobConf conf,
                                int numMaps,
                                int numReduces) throws IOException {
  
  final Path inDir = new Path("/testing/input");
  final Path outDir = new Path("/testing/output");
  FileSystem fs = FileSystem.get(fileSys, conf);
  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    LOG.warn("Can't create " + inDir);
    return false;
  }
  // generate an input file
  DataOutputStream file = fs.create(new Path(inDir, "part-0"));
  file.writeBytes("foo foo2 foo3");
  file.close();

  // use WordCount example
  FileSystem.setDefaultUri(conf, fileSys);
  conf.setJobName("foo");

  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputFormat(SpecialTextOutputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);
  conf.setMapperClass(IdentityMapper.class);        
  conf.setReducerClass(IdentityReducer.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);
    
  // run job and wait for completion
  RunningJob runningJob = JobClient.runJob(conf);
    
  try {
    assertTrue(runningJob.isComplete());
    assertTrue(runningJob.isSuccessful());
    assertTrue("Output folder not found!", fs.exists(new Path("/testing/output/" + OUTPUT_FILENAME)));
  } catch (NullPointerException npe) {
    // This NPE should no more happens
    fail("A NPE should not have happened.");
  }
        
  // return job result
  LOG.info("job is complete: " + runningJob.isSuccessful());
  return (runningJob.isSuccessful());
}
 
Example 17
Source File: TestKillCompletedJob.java    From RDFS with Apache License 2.0 4 votes vote down vote up
static Boolean launchWordCount(String fileSys,
                              String jobTracker,
                              JobConf conf,
                              String input,
                              int numMaps,
                              int numReduces) throws IOException {
  final Path inDir = new Path("/testing/wc/input");
  final Path outDir = new Path("/testing/wc/output");
  FileSystem fs = FileSystem.get(URI.create(fileSys), conf);
  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  {
    DataOutputStream file = fs.create(new Path(inDir, "part-0"));
    file.writeBytes(input);
    file.close();
  }

  FileSystem.setDefaultUri(conf, fileSys);
  conf.set("mapred.job.tracker", jobTracker);
  conf.setJobName("wordcount");
  conf.setInputFormat(TextInputFormat.class);
  
  // the keys are words (strings)
  conf.setOutputKeyClass(Text.class);
  // the values are counts (ints)
  conf.setOutputValueClass(IntWritable.class);
  
  conf.setMapperClass(WordCount.MapClass.class);
  conf.setCombinerClass(WordCount.Reduce.class);
  conf.setReducerClass(WordCount.Reduce.class);
  
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);

  RunningJob rj = JobClient.runJob(conf);
  JobID jobId = rj.getID();
  
  // Kill the job after it is successful
  if (rj.isSuccessful())
  {
    System.out.println("Job Id:" + jobId + 
      " completed successfully. Killing it now");
    rj.killJob();
  }
  
     
  return rj.isSuccessful();
    
}
 
Example 18
Source File: TestSpecialCharactersInOutputPath.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public static boolean launchJob(URI fileSys,
                                JobConf conf,
                                int numMaps,
                                int numReduces) throws IOException {
  
  final Path inDir = new Path("/testing/input");
  final Path outDir = new Path("/testing/output");
  FileSystem fs = FileSystem.get(fileSys, conf);
  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    LOG.warn("Can't create " + inDir);
    return false;
  }
  // generate an input file
  DataOutputStream file = fs.create(new Path(inDir, "part-0"));
  file.writeBytes("foo foo2 foo3");
  file.close();

  // use WordCount example
  FileSystem.setDefaultUri(conf, fileSys);
  conf.setJobName("foo");

  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputFormat(SpecialTextOutputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);
  conf.setMapperClass(IdentityMapper.class);        
  conf.setReducerClass(IdentityReducer.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);
    
  // run job and wait for completion
  RunningJob runningJob = JobClient.runJob(conf);
    
  try {
    assertTrue(runningJob.isComplete());
    assertTrue(runningJob.isSuccessful());
    assertTrue("Output folder not found!", fs.exists(new Path("/testing/output/" + OUTPUT_FILENAME)));
  } catch (NullPointerException npe) {
    // This NPE should no more happens
    fail("A NPE should not have happened.");
  }
        
  // return job result
  LOG.info("job is complete: " + runningJob.isSuccessful());
  return (runningJob.isSuccessful());
}
 
Example 19
Source File: TestBlockStoragePolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseSsdOverDisk() throws Exception {
  BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};

  final DatanodeStorageInfo[] diskStorages
      = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
  final DatanodeDescriptor[] dataNodes
      = DFSTestUtil.toDatanodeDescriptor(diskStorages);
  for(int i = 0; i < dataNodes.length; i++) {
    BlockManagerTestUtil.updateStorage(dataNodes[i],
        new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
            StorageType.SSD));
  }

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy);
  System.out.println(policy.getName() + ": " + Arrays.asList(targets));
  Assert.assertEquals(2, targets.length);
  Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
  Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
 
Example 20
Source File: GenericOptionsParser.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
 * Modify configuration according user-specified generic options
 * @param conf Configuration to be modified
 * @param line User-specified generic options
 */
private void processGeneralOptions(Configuration conf,
    CommandLine line) {
  if (line.hasOption("fs")) {
    FileSystem.setDefaultUri(conf, line.getOptionValue("fs"));
  }

  if (line.hasOption("jt")) {
    conf.set("mapred.job.tracker", line.getOptionValue("jt"));
  }
  if (line.hasOption("conf")) {
    String[] values = line.getOptionValues("conf");
    for(String value : values) {
      conf.addResource(new Path(value));
    }
  }
  try {
    if (line.hasOption("libjars")) {
      conf.set("tmpjars", 
               validateFiles(line.getOptionValue("libjars"), conf));
      //setting libjars in client classpath
      URL[] libjars = getLibJars(conf);
      if(libjars!=null && libjars.length>0) {
        conf.setClassLoader(new URLClassLoader(libjars, conf.getClassLoader()));
        Thread.currentThread().setContextClassLoader(
            new URLClassLoader(libjars, 
                Thread.currentThread().getContextClassLoader()));
      }
    }
    if (line.hasOption("files")) {
      conf.set("tmpfiles", 
               validateFiles(line.getOptionValue("files"), conf));
    }
    if (line.hasOption("archives")) {
      conf.set("tmparchives", 
                validateFiles(line.getOptionValue("archives"), conf));
    }
  } catch (IOException ioe) {
    System.err.println(StringUtils.stringifyException(ioe));
  }
  if (line.hasOption('D')) {
    String[] property = line.getOptionValues('D');
    for(String prop : property) {
      String[] keyval = prop.split("=", 2);
      if (keyval.length == 2) {
        conf.set(keyval[0], keyval[1]);
      }
    }
  }
  conf.setBoolean("mapred.used.genericoptionsparser", true);
}