Java Code Examples for org.apache.hadoop.conf.Configuration.set()

The following are Jave code examples for showing how to use set() of the org.apache.hadoop.conf.Configuration class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: multiple-dimension-spread   File: TestMDSSerde.java   Source Code and License Vote up 8 votes
@Test
public void T_initialize_3() throws SerDeException{
  MDSSerde serde = new MDSSerde();
  Configuration conf = new Configuration();
  conf.set( ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR , "num" );

  Properties table = new Properties();
  Properties part = new Properties();
  table.setProperty( serdeConstants.LIST_COLUMNS , "str,num,arry,nest" );
  table.setProperty( serdeConstants.LIST_COLUMN_TYPES , "string,int,array<string>,struct<a:string,b:int>" );

  serde.initialize( conf , table , part );
  StructObjectInspector inspector = (StructObjectInspector)( serde.getObjectInspector() );
  List<? extends StructField> fieldList = inspector.getAllStructFieldRefs();
  assertEquals( fieldList.get(0).getFieldName() , "num" );

  assertEquals( ( fieldList.get(0).getFieldObjectInspector() instanceof PrimitiveObjectInspector ) , true );
}
 
Example 2
Project: hadoop   File: TestMiniDFSCluster.java   Source Code and License Vote up 6 votes
/** MiniDFSCluster should not clobber dfs.datanode.hostname if requested */
@Test(timeout=100000)
public void testClusterSetDatanodeHostname() throws Throwable {
  assumeTrue(System.getProperty("os.name").startsWith("Linux"));
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST");
  File testDataCluster5 = new File(testDataPath, CLUSTER_5);
  String c5Path = testDataCluster5.getAbsolutePath();
  conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path);
  MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf)
    .numDataNodes(1)
    .checkDataNodeHostConfig(true)
    .build();
  try {
    assertEquals("DataNode hostname config not respected", "MYHOST",
        cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
  } finally {
    MiniDFSCluster.shutdownCluster(cluster5);
  }
}
 
Example 3
Project: hadoop-oss   File: TestFileSystemCanonicalization.java   Source Code and License Vote up 6 votes
@Test
public void testAuthorityFromDefaultFS() throws Exception {
  Configuration config = new Configuration();
  String defaultFsKey = CommonConfigurationKeys.FS_DEFAULT_NAME_KEY;
  
  FileSystem fs = getVerifiedFS("myfs://host", "myfs://host.a.b:123", config);
  verifyPaths(fs, new String[]{ "myfs://" }, -1, false);

  config.set(defaultFsKey, "myfs://host");
  verifyPaths(fs, new String[]{ "myfs://" }, -1, true);

  config.set(defaultFsKey, "myfs2://host");
  verifyPaths(fs, new String[]{ "myfs://" }, -1, false);

  config.set(defaultFsKey, "myfs://host:123");
  verifyPaths(fs, new String[]{ "myfs://" }, -1, true);

  config.set(defaultFsKey, "myfs://host:456");
  verifyPaths(fs, new String[]{ "myfs://" }, -1, false);
}
 
Example 4
Project: hadoop   File: DFSAdmin.java   Source Code and License Vote up 5 votes
public int refreshCallQueue() throws IOException {
  // Get the current configuration
  Configuration conf = getConf();
  
  // for security authorization
  // server principal for this call   
  // should be NN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));

  DistributedFileSystem dfs = getDFS();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri);

  if (isHaEnabled) {
    // Run refreshCallQueue for all NNs if HA is enabled
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<RefreshCallQueueProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(conf, nsId,
            RefreshCallQueueProtocol.class);
    for (ProxyAndInfo<RefreshCallQueueProtocol> proxy : proxies) {
      proxy.getProxy().refreshCallQueue();
      System.out.println("Refresh call queue successful for "
          + proxy.getAddress());
    }
  } else {
    // Create the client
    RefreshCallQueueProtocol refreshProtocol =
        NameNodeProxies.createProxy(conf, FileSystem.getDefaultUri(conf),
            RefreshCallQueueProtocol.class).getProxy();

    // Refresh the call queue
    refreshProtocol.refreshCallQueue();
    System.out.println("Refresh call queue successful");
  }

  return 0;
}
 
Example 5
Project: hadoop   File: TestFsPermission.java   Source Code and License Vote up 5 votes
public void testDeprecatedUmask() {
  Configuration conf = new Configuration();
  conf.set(FsPermission.DEPRECATED_UMASK_LABEL, "302"); // 302 = 0456
  FsPermission umask = FsPermission.getUMask(conf);

  assertEquals(0456, umask.toShort());
}
 
Example 6
Project: hadoop   File: BaseTestHttpFSWith.java   Source Code and License Vote up 5 votes
protected FileSystem getHttpFSFileSystem() throws Exception {
  Configuration conf = new Configuration();
  conf.set("fs.webhdfs.impl", getFileSystemClass().getName());
  URI uri = new URI(getScheme() + "://" +
                    TestJettyHelper.getJettyURL().toURI().getAuthority());
  return FileSystem.get(uri, conf);
}
 
Example 7
Project: hadoop   File: TestMRCJCFileOutputCommitter.java   Source Code and License Vote up 5 votes
@SuppressWarnings("unchecked")
public void testCommitter() throws Exception {
  Job job = Job.getInstance();
  FileOutputFormat.setOutputPath(job, outDir);
  Configuration conf = job.getConfiguration();
  conf.set(MRJobConfig.TASK_ATTEMPT_ID, attempt);
  JobContext jContext = new JobContextImpl(conf, taskID.getJobID());
  TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, taskID);
  FileOutputCommitter committer = new FileOutputCommitter(outDir, tContext);

  // setup
  committer.setupJob(jContext);
  committer.setupTask(tContext);

  // write output
  TextOutputFormat theOutputFormat = new TextOutputFormat();
  RecordWriter theRecordWriter = theOutputFormat.getRecordWriter(tContext);
  writeOutput(theRecordWriter, tContext);

  // do commit
  committer.commitTask(tContext);
  committer.commitJob(jContext);

  // validate output
  File expectedFile = new File(new Path(outDir, partFile).toString());
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append(key1).append('\t').append(val1).append("\n");
  expectedOutput.append(val1).append("\n");
  expectedOutput.append(val2).append("\n");
  expectedOutput.append(key2).append("\n");
  expectedOutput.append(key1).append("\n");
  expectedOutput.append(key2).append('\t').append(val2).append("\n");
  String output = UtilsForTests.slurp(expectedFile);
  assertEquals(output, expectedOutput.toString());
  FileUtil.fullyDelete(new File(outDir.toString()));
}
 
Example 8
Project: QDrill   File: FileSystemPlugin.java   Source Code and License Vote up 5 votes
public FileSystemPlugin(FileSystemConfig config, DrillbitContext context, String name) throws ExecutionSetupException{
  try {
    this.config = config;
    this.context = context;

    fsConf = new Configuration();
    fsConf.set(FileSystem.FS_DEFAULT_NAME_KEY, config.connection);
    fsConf.set("fs.classpath.impl", ClassPathFileSystem.class.getName());
    fsConf.set("fs.drill-local.impl", LocalSyncableFileSystem.class.getName());

    formatPluginsByName = FormatCreator.getFormatPlugins(context, fsConf, config);
    List<FormatMatcher> matchers = Lists.newArrayList();
    formatPluginsByConfig = Maps.newHashMap();
    for (FormatPlugin p : formatPluginsByName.values()) {
      matchers.add(p.getMatcher());
      formatPluginsByConfig.put(p.getConfig(), p);
    }

    final boolean noWorkspace = config.workspaces == null || config.workspaces.isEmpty();
    List<WorkspaceSchemaFactory> factories = Lists.newArrayList();
    if (!noWorkspace) {
      for (Map.Entry<String, WorkspaceConfig> space : config.workspaces.entrySet()) {
        factories.add(new WorkspaceSchemaFactory(context.getConfig(), this, space.getKey(), name, space.getValue(), matchers));
      }
    }

    // if the "default" workspace is not given add one.
    if (noWorkspace || !config.workspaces.containsKey(DEFAULT_WS_NAME)) {
      factories.add(new WorkspaceSchemaFactory(context.getConfig(), this, DEFAULT_WS_NAME, name, WorkspaceConfig.DEFAULT, matchers));
    }

    this.schemaFactory = new FileSystemSchemaFactory(name, factories);
  } catch (IOException e) {
    throw new ExecutionSetupException("Failure setting up file system plugin.", e);
  }
}
 
Example 9
Project: hadoop   File: TestWasbUriAndConfiguration.java   Source Code and License Vote up 5 votes
@Test
public void testValidKeyProvider() throws Exception {
  Configuration conf = new Configuration();
  String account = "testacct";
  String key = "testkey";

  conf.set(SimpleKeyProvider.KEY_ACCOUNT_KEY_PREFIX + account, key);
  conf.setClass("fs.azure.account.keyprovider." + account,
      SimpleKeyProvider.class, KeyProvider.class);
  String result = AzureNativeFileSystemStore.getAccountKeyFromConfiguration(
      account, conf);
  assertEquals(key, result);
}
 
Example 10
Project: hadoop   File: TestNameEditsConfigs.java   Source Code and License Vote up 5 votes
/**
 * Test dfs.namenode.checkpoint.dir and dfs.namenode.checkpoint.edits.dir
 * should tolerate white space between values.
 */
@Test
public void testCheckPointDirsAreTrimmed() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File checkpointNameDir1 = new File(base_dir, "chkptName1");
  File checkpointEditsDir1 = new File(base_dir, "chkptEdits1");
  File checkpointNameDir2 = new File(base_dir, "chkptName2");
  File checkpointEditsDir2 = new File(base_dir, "chkptEdits2");
  File nameDir = new File(base_dir, "name1");
  String whiteSpace = "  \n   \n  ";
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getPath());
  conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, whiteSpace
      + checkpointNameDir1.getPath() + whiteSpace, whiteSpace
      + checkpointNameDir2.getPath() + whiteSpace);
  conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
      whiteSpace + checkpointEditsDir1.getPath() + whiteSpace, whiteSpace
          + checkpointEditsDir2.getPath() + whiteSpace);
  cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
      .numDataNodes(3).build();
  try {
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    secondary.doCheckpoint();
    assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
        checkpointNameDir1.exists());
    assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
        checkpointNameDir2.exists());
    assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
        + " must be trimmed ", checkpointEditsDir1.exists());
    assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
        + " must be trimmed ", checkpointEditsDir2.exists());
  } finally {
    secondary.shutdown();
    cluster.shutdown();
  }
}
 
Example 11
Project: hadoop-oss   File: TestAuthenticationSessionCookie.java   Source Code and License Vote up 5 votes
public void startServer(boolean isTestSessionCookie) throws Exception {
  Configuration conf = new Configuration();
  if (isTestSessionCookie) {
    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
          DummyFilterInitializer.class.getName());
  } else {
    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
          Dummy2FilterInitializer.class.getName());
  }

  File base = new File(BASEDIR);
  FileUtil.fullyDelete(base);
  base.mkdirs();
  keystoresDir = new File(BASEDIR).getAbsolutePath();
  sslConfDir = KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);

  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  Configuration sslConf = KeyStoreTestUtil.getSslConfig();

  server = new HttpServer2.Builder()
          .setName("test")
          .addEndpoint(new URI("http://localhost"))
          .addEndpoint(new URI("https://localhost"))
          .setConf(conf)
          .keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
          .keyStore(sslConf.get("ssl.server.keystore.location"),
                  sslConf.get("ssl.server.keystore.password"),
                  sslConf.get("ssl.server.keystore.type", "jks"))
          .trustStore(sslConf.get("ssl.server.truststore.location"),
                  sslConf.get("ssl.server.truststore.password"),
                  sslConf.get("ssl.server.truststore.type", "jks")).build();
  server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
  server.start();
}
 
Example 12
Project: aliyun-maxcompute-data-collectors   File: SqoopTool.java   Source Code and License Vote up 5 votes
/**
 * Add the specified plugin class name to the configuration string
 * listing plugin classes.
 */
private static void addPlugin(Configuration conf, String pluginName) {
  String existingPlugins = conf.get(TOOL_PLUGINS_KEY);
  String newPlugins = null;
  if (null == existingPlugins || existingPlugins.length() == 0) {
    newPlugins = pluginName;
  } else {
    newPlugins = existingPlugins + "," + pluginName;
  }

  conf.set(TOOL_PLUGINS_KEY, newPlugins);
}
 
Example 13
Project: hadoop   File: TestWritable.java   Source Code and License Vote up 5 votes
/** Test that comparator is defined and configured. */
public static void testGetComparator() throws Exception {
  Configuration conf = new Configuration();

  // Without conf.
  WritableComparator frobComparator = WritableComparator.get(Frob.class);
  assert(frobComparator instanceof FrobComparator);
  assertNotNull(frobComparator.getConf());
  assertNull(frobComparator.getConf().get(TEST_CONFIG_PARAM));

  // With conf.
  conf.set(TEST_CONFIG_PARAM, TEST_CONFIG_VALUE);
  frobComparator = WritableComparator.get(Frob.class, conf);
  assert(frobComparator instanceof FrobComparator);
  assertNotNull(frobComparator.getConf());
  assertEquals(conf.get(TEST_CONFIG_PARAM), TEST_CONFIG_VALUE);

  // Without conf. should reuse configuration.
  frobComparator = WritableComparator.get(Frob.class);
  assert(frobComparator instanceof FrobComparator);
  assertNotNull(frobComparator.getConf());
  assertEquals(conf.get(TEST_CONFIG_PARAM), TEST_CONFIG_VALUE);

  // New conf. should use new configuration.
  frobComparator = WritableComparator.get(Frob.class, new Configuration());
  assert(frobComparator instanceof FrobComparator);
  assertNotNull(frobComparator.getConf());
  assertNull(frobComparator.getConf().get(TEST_CONFIG_PARAM));
}
 
Example 14
Project: ditb   File: TestMultiTableInputFormat.java   Source Code and License Vote up 4 votes
/**
 * Tests a MR scan using specific start and stop rows.
 *
 * @throws IOException
 * @throws ClassNotFoundException
 * @throws InterruptedException
 */
private void testScan(String start, String stop, String last)
    throws IOException, InterruptedException, ClassNotFoundException {
  String jobName =
      "Scan" + (start != null ? start.toUpperCase() : "Empty") + "To" +
          (stop != null ? stop.toUpperCase() : "Empty");
  LOG.info("Before map/reduce startup - job " + jobName);
  Configuration c = new Configuration(TEST_UTIL.getConfiguration());
  
  c.set(KEY_STARTROW, start != null ? start : "");
  c.set(KEY_LASTROW, last != null ? last : "");
  
  List<Scan> scans = new ArrayList<Scan>();
  
  for(int i=0; i<3; i++){
    Scan scan = new Scan();
    
    scan.addFamily(INPUT_FAMILY);
    scan.setAttribute(Scan.SCAN_ATTRIBUTES_TABLE_NAME, Bytes.toBytes(TABLE_NAME + i));
    
    if (start != null) {
      scan.setStartRow(Bytes.toBytes(start));
    }
    if (stop != null) {
      scan.setStopRow(Bytes.toBytes(stop));
    }
    
    scans.add(scan);
    
    LOG.info("scan before: " + scan);
  }
  
  Job job = new Job(c, jobName);

  TableMapReduceUtil.initTableMapperJob(scans, ScanMapper.class,
      ImmutableBytesWritable.class, ImmutableBytesWritable.class, job);
  job.setReducerClass(ScanReducer.class);
  job.setNumReduceTasks(1); // one to get final "first" and "last" key
  FileOutputFormat.setOutputPath(job,
    new Path(TEST_UTIL.getDataTestDirOnTestFS(), job.getJobName()));
  LOG.info("Started " + job.getJobName());
  job.waitForCompletion(true);
  assertTrue(job.isSuccessful());
  LOG.info("After map/reduce completion - job " + jobName);
}
 
Example 15
Project: hadoop   File: TestCopyCommitter.java   Source Code and License Vote up 4 votes
@Test
public void testPreserveStatus() {
  TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config);
  JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(),
      taskAttemptContext.getTaskAttemptID().getJobID());
  Configuration conf = jobContext.getConfiguration();


  String sourceBase;
  String targetBase;
  FileSystem fs = null;
  try {
    OutputCommitter committer = new CopyCommitter(null, taskAttemptContext);
    fs = FileSystem.get(conf);
    FsPermission sourcePerm = new FsPermission((short) 511);
    FsPermission initialPerm = new FsPermission((short) 448);
    sourceBase = TestDistCpUtils.createTestSetup(fs, sourcePerm);
    targetBase = TestDistCpUtils.createTestSetup(fs, initialPerm);

    DistCpOptions options = new DistCpOptions(Arrays.asList(new Path(sourceBase)),
        new Path("/out"));
    options.preserve(FileAttribute.PERMISSION);
    options.appendToConf(conf);
    options.setTargetPathExists(false);
    
    CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS);
    Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong()));
    listing.buildListing(listingFile, options);

    conf.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH, targetBase);

    committer.commitJob(jobContext);
    if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
      Assert.fail("Permission don't match");
    }

    //Test for idempotent commit
    committer.commitJob(jobContext);
    if (!checkDirectoryPermissions(fs, targetBase, sourcePerm)) {
      Assert.fail("Permission don't match");
    }

  } catch (IOException e) {
    LOG.error("Exception encountered while testing for preserve status", e);
    Assert.fail("Preserve status failure");
  } finally {
    TestDistCpUtils.delete(fs, "/tmp1");
    conf.unset(DistCpConstants.CONF_LABEL_PRESERVE_STATUS);
  }

}
 
Example 16
Project: hadoop   File: BenchmarkThroughput.java   Source Code and License Vote up 4 votes
@Override
public int run(String[] args) throws IOException {
  // silence the minidfs cluster
  Log hadoopLog = LogFactory.getLog("org");
  if (hadoopLog instanceof Log4JLogger) {
    ((Log4JLogger) hadoopLog).getLogger().setLevel(Level.WARN);
  }
  int reps = 1;
  if (args.length == 1) {
    try {
      reps = Integer.parseInt(args[0]);
    } catch (NumberFormatException e) {
      printUsage();
      return -1;
    }
  } else if (args.length > 1) {
    printUsage();
    return -1;
  }
  Configuration conf = getConf();
  // the size of the file to write
  long SIZE = conf.getLong("dfsthroughput.file.size",
      10L * 1024 * 1024 * 1024);
  BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);

  String localDir = conf.get("mapred.temp.dir");
  if (localDir == null) {
    localDir = conf.get("hadoop.tmp.dir");
    conf.set("mapred.temp.dir", localDir);
  }
  dir = new LocalDirAllocator("mapred.temp.dir");

  System.setProperty("test.build.data", localDir);
  System.out.println("Local = " + localDir);
  ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
  FileSystem rawLocal = checkedLocal.getRawFileSystem();
  for(int i=0; i < reps; ++i) {
    writeAndReadLocalFile("local", conf, SIZE);
    writeAndReadFile(rawLocal, "raw", conf, SIZE);
    writeAndReadFile(checkedLocal, "checked", conf, SIZE);
  }
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf)
                                .racks(new String[]{"/foo"}).build();
    cluster.waitActive();
    FileSystem dfs = cluster.getFileSystem();
    for(int i=0; i < reps; ++i) {
      writeAndReadFile(dfs, "dfs", conf, SIZE);
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
      // clean up minidfs junk
      rawLocal.delete(new Path(localDir, "dfs"), true);
    }
  }
  return 0;
}
 
Example 17
Project: hadoop   File: LazyPersistTestCase.java   Source Code and License Vote up 4 votes
/**
 * If ramDiskStorageLimit is >=0, then RAM_DISK capacity is artificially
 * capped. If ramDiskStorageLimit < 0 then it is ignored.
 */
protected final void startUpCluster(boolean hasTransientStorage,
                                    final int ramDiskReplicaCapacity,
                                    final boolean useSCR,
                                    final boolean useLegacyBlockReaderLocal)
    throws IOException {

  Configuration conf = new Configuration();
  conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFS_NAMENODE_LAZY_PERSIST_FILE_SCRUB_INTERVAL_SEC,
              LAZY_WRITE_FILE_SCRUBBER_INTERVAL_SEC);
  conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL_SEC);
  conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
              HEARTBEAT_RECHECK_INTERVAL_MSEC);
  conf.setInt(DFS_DATANODE_LAZY_WRITER_INTERVAL_SEC,
              LAZY_WRITER_INTERVAL_SEC);
  conf.setInt(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
              EVICTION_LOW_WATERMARK * BLOCK_SIZE);

  if (useSCR) {
    conf.setBoolean(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
    // Do not share a client context across tests.
    conf.set(DFS_CLIENT_CONTEXT, UUID.randomUUID().toString());
    if (useLegacyBlockReaderLocal) {
      conf.setBoolean(DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL, true);
      conf.set(DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY,
          UserGroupInformation.getCurrentUser().getShortUserName());
    } else {
      sockDir = new TemporarySocketDirectory();
      conf.set(DFS_DOMAIN_SOCKET_PATH_KEY, new File(sockDir.getDir(),
          this.getClass().getSimpleName() + "._PORT.sock").getAbsolutePath());
    }
  }

  long[] capacities = null;
  if (hasTransientStorage && ramDiskReplicaCapacity >= 0) {
    // Convert replica count to byte count, add some delta for .meta and
    // VERSION files.
    long ramDiskStorageLimit = ((long) ramDiskReplicaCapacity * BLOCK_SIZE) +
        (BLOCK_SIZE - 1);
    capacities = new long[] { ramDiskStorageLimit, -1 };
  }

  cluster = new MiniDFSCluster
      .Builder(conf)
      .numDataNodes(REPL_FACTOR)
      .storageCapacities(capacities)
      .storageTypes(hasTransientStorage ?
          new StorageType[]{ RAM_DISK, DEFAULT } : null)
      .build();
  fs = cluster.getFileSystem();
  client = fs.getClient();
  try {
    jmx = initJMX();
  } catch (Exception e) {
    fail("Failed initialize JMX for testing: " + e);
  }
  LOG.info("Cluster startup complete");
}
 
Example 18
Project: aliyun-maxcompute-data-collectors   File: TestSplittableBufferedWriter.java   Source Code and License Vote up 4 votes
private Configuration getConf() {
  Configuration conf = new Configuration();
  conf.set("fs.default.name", "file:///");
  return conf;
}
 
Example 19
Project: hadoop   File: DefaultStringifier.java   Source Code and License Vote up 3 votes
/**
 * Stores the item in the configuration with the given keyName.
 * 
 * @param <K>  the class of the item
 * @param conf the configuration to store
 * @param item the object to be stored
 * @param keyName the name of the key to use
 * @throws IOException : forwards Exceptions from the underlying 
 * {@link Serialization} classes. 
 */
public static <K> void store(Configuration conf, K item, String keyName)
throws IOException {

  DefaultStringifier<K> stringifier = new DefaultStringifier<K>(conf,
      GenericsUtil.getClass(item));
  conf.set(keyName, stringifier.toString(item));
  stringifier.close();
}
 
Example 20
Project: hadoop-oss   File: FileSystem.java   Source Code and License Vote up 2 votes
/** Set the default filesystem URI in a configuration.
 * @param conf the configuration to alter
 * @param uri the new default filesystem uri
 */
public static void setDefaultUri(Configuration conf, URI uri) {
  conf.set(FS_DEFAULT_NAME_KEY, uri.toString());
}