Java Code Examples for org.apache.hadoop.fs.FileSystem.makeQualified()

The following are Jave code examples for showing how to use makeQualified() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop-oss   File: TestMapFile.java   View Source Code Vote up 6 votes
@Test
@SuppressWarnings("deprecation")
public void testMidKey() throws Exception {
  // Write a mapfile of simple data: keys are
  Path dirName = new Path(TEST_DIR, "testMidKey.mapfile");
  FileSystem fs = FileSystem.getLocal(conf);
  Path qualifiedDirName = fs.makeQualified(dirName);

  MapFile.Writer writer = null;
  MapFile.Reader reader = null;
  try {
    writer = new MapFile.Writer(conf, fs, qualifiedDirName.toString(),
      IntWritable.class, IntWritable.class);
    writer.append(new IntWritable(1), new IntWritable(1));
    writer.close();
    // Now do getClosest on created mapfile.
    reader = new MapFile.Reader(qualifiedDirName, conf);
    assertEquals(new IntWritable(1), reader.midKey());
  } finally {
    IOUtils.cleanup(null, writer, reader);
  }
}
 
Example 2
Project: hadoop-oss   File: TestMapFile.java   View Source Code Vote up 6 votes
@Test
@SuppressWarnings("deprecation")
public void testMidKeyEmpty() throws Exception {
  // Write a mapfile of simple data: keys are
  Path dirName = new Path(TEST_DIR, "testMidKeyEmpty.mapfile");
  FileSystem fs = FileSystem.getLocal(conf);
  Path qualifiedDirName = fs.makeQualified(dirName);

  MapFile.Writer writer = new MapFile.Writer(conf, fs,
      qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
  writer.close();
  // Now do getClosest on created mapfile.
  MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
  try {
    assertEquals(null, reader.midKey()); 
  } finally {
    reader.close();
  }
}
 
Example 3
Project: circus-train   File: HdfsSnapshotLocationManager.java   View Source Code Vote up 6 votes
private Path createSnapshot() throws IOException {
  LOG.debug("Source table {}.{} has its data located at {}", sourceTable.getDbName(), sourceTable.getTableName(),
      sourceDataPath);

  FileSystem fileSystem = fileSystemFactory.get(sourceDataPath, sourceHiveConf);
  Path snapshotMetaDataPath = new Path(sourceDataPath, HdfsConstants.DOT_SNAPSHOT_DIR);
  Path resolvedLocation = sourceDataPath;
  if (fileSystem.exists(snapshotMetaDataPath)) {
    if (snapshotsDisabled) {
      LOG.info("Path {} can be snapshot, but feature has been disabled.", sourceDataPath);
    } else {
      LOG.debug("Creating source data snapshot: {}, {}", sourceDataPath, eventId);
      // fileSystem.createSnapshot does not return a fully qualified URI.
      resolvedLocation = fileSystem.makeQualified(fileSystem.createSnapshot(sourceDataPath, eventId));
      snapshotPath = resolvedLocation;
    }
  } else {
    LOG.debug("Snapshots not enabled on source location: {}", sourceDataPath);
  }
  return resolvedLocation;
}
 
Example 4
Project: hadoop   File: TestMapFile.java   View Source Code Vote up 6 votes
@Test
@SuppressWarnings("deprecation")
public void testMidKey() throws Exception {
  // Write a mapfile of simple data: keys are
  Path dirName = new Path(TEST_DIR, "testMidKey.mapfile");
  FileSystem fs = FileSystem.getLocal(conf);
  Path qualifiedDirName = fs.makeQualified(dirName);

  MapFile.Writer writer = null;
  MapFile.Reader reader = null;
  try {
    writer = new MapFile.Writer(conf, fs, qualifiedDirName.toString(),
      IntWritable.class, IntWritable.class);
    writer.append(new IntWritable(1), new IntWritable(1));
    writer.close();
    // Now do getClosest on created mapfile.
    reader = new MapFile.Reader(qualifiedDirName, conf);
    assertEquals(new IntWritable(1), reader.midKey());
  } finally {
    IOUtils.cleanup(null, writer, reader);
  }
}
 
Example 5
Project: hadoop   File: FSDownload.java   View Source Code Vote up 6 votes
/**
 * Returns a boolean to denote whether a cache file is visible to all (public)
 * or not
 *
 * @return true if the path in the current path is visible to all, false
 * otherwise
 */
@Private
public static boolean isPublic(FileSystem fs, Path current, FileStatus sStat,
    LoadingCache<Path,Future<FileStatus>> statCache) throws IOException {
  current = fs.makeQualified(current);
  //the leaf level file should be readable by others
  if (!checkPublicPermsForAll(fs, sStat, FsAction.READ_EXECUTE, FsAction.READ)) {
    return false;
  }

  if (Shell.WINDOWS && fs instanceof LocalFileSystem) {
    // Relax the requirement for public cache on LFS on Windows since default
    // permissions are "700" all the way up to the drive letter. In this
    // model, the only requirement for a user is to give EVERYONE group
    // permission on the file and the file will be considered public.
    // This code path is only hit when fs.default.name is file:/// (mainly
    // in tests).
    return true;
  }
  return ancestorsHaveExecutePermissions(fs, current.getParent(), statCache);
}
 
Example 6
Project: hadoop   File: TestBinaryTokenFile.java   View Source Code Vote up 6 votes
@BeforeClass
public static void setUp() throws Exception {
  final Configuration conf = new Configuration();
  
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
  conf.set(YarnConfiguration.RM_PRINCIPAL, "jt_id/" + SecurityUtil.HOSTNAME_PATTERN + "@APACHE.ORG");
  
  final MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
  builder.checkExitOnShutdown(true);
  builder.numDataNodes(numSlaves);
  builder.format(true);
  builder.racks(null);
  dfsCluster = builder.build();
  
  mrCluster = new MiniMRYarnCluster(TestBinaryTokenFile.class.getName(), noOfNMs);
  mrCluster.init(conf);
  mrCluster.start();

  NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads(); 
  
  FileSystem fs = dfsCluster.getFileSystem(); 
  p1 = new Path("file1");
  p1 = fs.makeQualified(p1);
}
 
Example 7
Project: hadoop   File: FileOutputFormat.java   View Source Code Vote up 6 votes
public void checkOutputSpecs(FileSystem ignored, JobConf job) 
  throws FileAlreadyExistsException, 
         InvalidJobConfException, IOException {
  // Ensure that the output directory is set and not already there
  Path outDir = getOutputPath(job);
  if (outDir == null && job.getNumReduceTasks() != 0) {
    throw new InvalidJobConfException("Output directory not set in JobConf.");
  }
  if (outDir != null) {
    FileSystem fs = outDir.getFileSystem(job);
    // normalize the output directory
    outDir = fs.makeQualified(outDir);
    setOutputPath(job, outDir);
    
    // get delegation token for the outDir's file system
    TokenCache.obtainTokensForNamenodes(job.getCredentials(), 
                                        new Path[] {outDir}, job);
    
    // check its existence
    if (fs.exists(outDir)) {
      throw new FileAlreadyExistsException("Output directory " + outDir + 
                                           " already exists");
    }
  }
}
 
Example 8
Project: hadoop   File: FileOutputCommitter.java   View Source Code Vote up 6 votes
/**
 * Create a file output committer
 * @param outputPath the job's output path, or null if you want the output
 * committer to act as a noop.
 * @param context the task's context
 * @throws IOException
 */
@Private
public FileOutputCommitter(Path outputPath, 
                           JobContext context) throws IOException {
  Configuration conf = context.getConfiguration();
  algorithmVersion =
      conf.getInt(FILEOUTPUTCOMMITTER_ALGORITHM_VERSION,
                  FILEOUTPUTCOMMITTER_ALGORITHM_VERSION_DEFAULT);
  LOG.info("File Output Committer Algorithm version is " + algorithmVersion);
  if (algorithmVersion != 1 && algorithmVersion != 2) {
    throw new IOException("Only 1 or 2 algorithm version is supported");
  }
  if (outputPath != null) {
    FileSystem fs = outputPath.getFileSystem(context.getConfiguration());
    this.outputPath = fs.makeQualified(outputPath);
  }
}
 
Example 9
Project: hadoop   File: TestMapFile.java   View Source Code Vote up 6 votes
@Test
@SuppressWarnings("deprecation")
public void testMidKeyEmpty() throws Exception {
  // Write a mapfile of simple data: keys are
  Path dirName = new Path(TEST_DIR, "testMidKeyEmpty.mapfile");
  FileSystem fs = FileSystem.getLocal(conf);
  Path qualifiedDirName = fs.makeQualified(dirName);

  MapFile.Writer writer = new MapFile.Writer(conf, fs,
      qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
  writer.close();
  // Now do getClosest on created mapfile.
  MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
  try {
    assertEquals(null, reader.midKey()); 
  } finally {
    reader.close();
  }
}
 
Example 10
Project: hadoop-oss   File: GenericOptionsParser.java   View Source Code Vote up 5 votes
/**
 * Modify configuration according user-specified generic options
 * @param conf Configuration to be modified
 * @param line User-specified generic options
 */
private void processGeneralOptions(Configuration conf,
    CommandLine line) throws IOException {
  if (line.hasOption("fs")) {
    FileSystem.setDefaultUri(conf, line.getOptionValue("fs"));
  }

  if (line.hasOption("jt")) {
    String optionValue = line.getOptionValue("jt");
    if (optionValue.equalsIgnoreCase("local")) {
      conf.set("mapreduce.framework.name", optionValue);
    }

    conf.set("yarn.resourcemanager.address", optionValue, 
        "from -jt command line option");
  }
  if (line.hasOption("conf")) {
    String[] values = line.getOptionValues("conf");
    for(String value : values) {
      conf.addResource(new Path(value));
    }
  }

  if (line.hasOption('D')) {
    String[] property = line.getOptionValues('D');
    for(String prop : property) {
      String[] keyval = prop.split("=", 2);
      if (keyval.length == 2) {
        conf.set(keyval[0], keyval[1], "from command line");
      }
    }
  }

  if (line.hasOption("libjars")) {
    conf.set("tmpjars", 
             validateFiles(line.getOptionValue("libjars"), conf),
             "from -libjars command line option");
    //setting libjars in client classpath
    URL[] libjars = getLibJars(conf);
    if(libjars!=null && libjars.length>0) {
      conf.setClassLoader(new URLClassLoader(libjars, conf.getClassLoader()));
      Thread.currentThread().setContextClassLoader(
          new URLClassLoader(libjars, 
              Thread.currentThread().getContextClassLoader()));
    }
  }
  if (line.hasOption("files")) {
    conf.set("tmpfiles", 
             validateFiles(line.getOptionValue("files"), conf),
             "from -files command line option");
  }
  if (line.hasOption("archives")) {
    conf.set("tmparchives", 
              validateFiles(line.getOptionValue("archives"), conf),
              "from -archives command line option");
  }
  conf.setBoolean("mapreduce.client.genericoptionsparser.used", true);
  
  // tokensFile
  if(line.hasOption("tokenCacheFile")) {
    String fileName = line.getOptionValue("tokenCacheFile");
    // check if the local file exists
    FileSystem localFs = FileSystem.getLocal(conf);
    Path p = localFs.makeQualified(new Path(fileName));
    if (!localFs.exists(p)) {
        throw new FileNotFoundException("File "+fileName+" does not exist.");
    }
    if(LOG.isDebugEnabled()) {
      LOG.debug("setting conf tokensFile: " + fileName);
    }
    UserGroupInformation.getCurrentUser().addCredentials(
        Credentials.readTokenStorageFile(p, conf));
    conf.set("mapreduce.job.credentials.binary", p.toString(),
             "from -tokenCacheFile command line option");

  }
}
 
Example 11
Project: ditb   File: TestImportTSVWithTTLs.java   View Source Code Vote up 5 votes
protected static Tool doMROnTableTest(HBaseTestingUtility util, String family, String data,
    String[] args, int valueMultiplier) throws Exception {
  TableName table = TableName.valueOf(args[args.length - 1]);
  Configuration conf = new Configuration(util.getConfiguration());

  // populate input file
  FileSystem fs = FileSystem.get(conf);
  Path inputPath = fs.makeQualified(new Path(util
      .getDataTestDirOnTestFS(table.getNameAsString()), "input.dat"));
  FSDataOutputStream op = fs.create(inputPath, true);
  op.write(Bytes.toBytes(data));
  op.close();
  LOG.debug(String.format("Wrote test data to file: %s", inputPath));

  if (conf.getBoolean(FORCE_COMBINER_CONF, true)) {
    LOG.debug("Forcing combiner.");
    conf.setInt("mapreduce.map.combine.minspills", 1);
  }

  // run the import
  List<String> argv = new ArrayList<String>(Arrays.asList(args));
  argv.add(inputPath.toString());
  Tool tool = new ImportTsv();
  LOG.debug("Running ImportTsv with arguments: " + argv);
  try {
    // Job will fail if observer rejects entries without TTL
    assertEquals(0, ToolRunner.run(conf, tool, argv.toArray(args)));
  } finally {
    // Clean up
    if (conf.getBoolean(DELETE_AFTER_LOAD_CONF, true)) {
      LOG.debug("Deleting test subdirectory");
      util.cleanupDataTestDirOnTestFS(table.getNameAsString());
    }
  }

  return tool;
}
 
Example 12
Project: ditb   File: TestHBaseTestingUtility.java   View Source Code Vote up 5 votes
@Test public void testMiniDFSCluster() throws Exception {
  HBaseTestingUtility hbt = new HBaseTestingUtility();
  MiniDFSCluster cluster = hbt.startMiniDFSCluster(null);
  FileSystem dfs = cluster.getFileSystem();
  Path dir = new Path("dir");
  Path qualifiedDir = dfs.makeQualified(dir);
  LOG.info("dir=" + dir + ", qualifiedDir=" + qualifiedDir);
  assertFalse(dfs.exists(qualifiedDir));
  assertTrue(dfs.mkdirs(qualifiedDir));
  assertTrue(dfs.delete(qualifiedDir, true));
  hbt.shutdownMiniCluster();
}
 
Example 13
Project: oryx2   File: BatchUpdateFunction.java   View Source Code Vote up 5 votes
/**
 * @return paths from {@link FileStatus}es into one comma-separated String
 * @see FileInputFormat#addInputPath(org.apache.hadoop.mapreduce.Job, Path)
 */
private static String joinFSPaths(FileSystem fs, FileStatus[] statuses) {
  StringBuilder joined = new StringBuilder();
  for (FileStatus status : statuses) {
    if (joined.length() > 0) {
      joined.append(',');
    }
    Path path = fs.makeQualified(status.getPath());
    joined.append(StringUtils.escapeString(path.toString()));
  }
  return joined.toString();
}
 
Example 14
Project: hadoop   File: TestMapFile.java   View Source Code Vote up 4 votes
/**
 * Test getClosest feature.
 * 
 * @throws Exception
 */
@Test
@SuppressWarnings("deprecation")
public void testGetClosest() throws Exception {
  // Write a mapfile of simple data: keys are
  Path dirName = new Path(TEST_DIR, "testGetClosest.mapfile");
  FileSystem fs = FileSystem.getLocal(conf);
  Path qualifiedDirName = fs.makeQualified(dirName);
  // Make an index entry for every third insertion.
  MapFile.Writer.setIndexInterval(conf, 3);
  MapFile.Writer writer = null;
  MapFile.Reader reader = null;
  try {
    writer = new MapFile.Writer(conf, fs, qualifiedDirName.toString(),
      Text.class, Text.class);
    // Assert that the index interval is 1
    assertEquals(3, writer.getIndexInterval());
    // Add entries up to 100 in intervals of ten.
    final int FIRST_KEY = 10;
    for (int i = FIRST_KEY; i < 100; i += 10) {
      String iStr = Integer.toString(i);
      Text t = new Text("00".substring(iStr.length()) + iStr);
      writer.append(t, t);
    }
    writer.close();
    // Now do getClosest on created mapfile.
    reader = new MapFile.Reader(qualifiedDirName, conf);
    Text key = new Text("55");
    Text value = new Text();
    Text closest = (Text) reader.getClosest(key, value);
    // Assert that closest after 55 is 60
    assertEquals(new Text("60"), closest);
    // Get closest that falls before the passed key: 50
    closest = (Text) reader.getClosest(key, value, true);
    assertEquals(new Text("50"), closest);
    // Test get closest when we pass explicit key
    final Text TWENTY = new Text("20");
    closest = (Text) reader.getClosest(TWENTY, value);
    assertEquals(TWENTY, closest);
    closest = (Text) reader.getClosest(TWENTY, value, true);
    assertEquals(TWENTY, closest);
    // Test what happens at boundaries. Assert if searching a key that is
    // less than first key in the mapfile, that the first key is returned.
    key = new Text("00");
    closest = (Text) reader.getClosest(key, value);
    assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));

    // If we're looking for the first key before, and we pass in a key before
    // the first key in the file, we should get null
    closest = (Text) reader.getClosest(key, value, true);
    assertNull(closest);

    // Assert that null is returned if key is > last entry in mapfile.
    key = new Text("99");
    closest = (Text) reader.getClosest(key, value);
    assertNull(closest);

    // If we were looking for the key before, we should get the last key
    closest = (Text) reader.getClosest(key, value, true);
    assertEquals(new Text("90"), closest);
  } finally {
    IOUtils.cleanup(null, writer, reader);
  }
}
 
Example 15
Project: hadoop-oss   File: TestGenericOptionsParser.java   View Source Code Vote up 4 votes
/**
 * testing -fileCache option
 * @throws IOException
 */
public void testTokenCacheOption() throws IOException {
  FileSystem localFs = FileSystem.getLocal(conf);
  
  File tmpFile = new File(testDir, "tokenCacheFile");
  if(tmpFile.exists()) {
    tmpFile.delete();
  }
  String[] args = new String[2];
  // pass a files option 
  args[0] = "-tokenCacheFile";
  args[1] = tmpFile.toURI().toString();
  
  // test non existing file
  Throwable th = null;
  try {
    new GenericOptionsParser(conf, args);
  } catch (Exception e) {
    th = e;
  }
  assertNotNull(th);
  assertTrue("FileNotFoundException is not thrown",
      th instanceof FileNotFoundException);
  
  // create file
  Path tmpPath = localFs.makeQualified(new Path(tmpFile.toString()));
  Token<?> token = new Token<AbstractDelegationTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(),
      new Text("token-kind"), new Text("token-service"));
  Credentials creds = new Credentials();
  creds.addToken(new Text("token-alias"), token);
  creds.writeTokenStorageFile(tmpPath, conf);

  new GenericOptionsParser(conf, args);
  String fileName = conf.get("mapreduce.job.credentials.binary");
  assertNotNull("files is null", fileName);
  assertEquals("files option does not match", tmpPath.toString(), fileName);
  
  Credentials ugiCreds =
      UserGroupInformation.getCurrentUser().getCredentials();
  assertEquals(1, ugiCreds.numberOfTokens());
  Token<?> ugiToken = ugiCreds.getToken(new Text("token-alias"));
  assertNotNull(ugiToken);
  assertEquals(token, ugiToken);
  
  localFs.delete(new Path(testDir.getAbsolutePath()), true);
}
 
Example 16
Project: hadoop   File: TestNativeAzureFileSystemOperationsMocked.java   View Source Code Vote up 4 votes
@Override
public Path getTestRootPath(FileSystem fSys, String pathString) {
  return fSys.makeQualified(new Path(TEST_ROOT_DIR, pathString));
}
 
Example 17
Project: hadoop   File: BaileyBorweinPlouffe.java   View Source Code Vote up 4 votes
/** Run a map/reduce job to compute Pi. */
private static void compute(int startDigit, int nDigits, int nMaps,
    String workingDir, Configuration conf, PrintStream out
    ) throws IOException {
  final String name = startDigit + "_" + nDigits;

  //setup wroking directory
  out.println("Working Directory = " + workingDir);
  out.println();
  final FileSystem fs = FileSystem.get(conf);
  final Path dir = fs.makeQualified(new Path(workingDir));
  if (fs.exists(dir)) {
    throw new IOException("Working directory " + dir
        + " already exists.  Please remove it first.");
  } else if (!fs.mkdirs(dir)) {
    throw new IOException("Cannot create working directory " + dir);
  }

  out.println("Start Digit      = " + startDigit);
  out.println("Number of Digits = " + nDigits);
  out.println("Number of Maps   = " + nMaps);

  // setup a job
  final Job job = createJob(name, conf);
  final Path hexfile = new Path(dir, "pi_" + name + ".hex");
  FileOutputFormat.setOutputPath(job, new Path(dir, "out"));

  // setup custom properties
  job.getConfiguration().set(WORKING_DIR_PROPERTY, dir.toString());
  job.getConfiguration().set(HEX_FILE_PROPERTY, hexfile.toString());

  job.getConfiguration().setInt(DIGIT_START_PROPERTY, startDigit);
  job.getConfiguration().setInt(DIGIT_SIZE_PROPERTY, nDigits);
  job.getConfiguration().setInt(DIGIT_PARTS_PROPERTY, nMaps);

  // start a map/reduce job
  out.println("\nStarting Job ...");
  final long startTime = System.currentTimeMillis();
  try {
    if (!job.waitForCompletion(true)) {
      out.println("Job failed.");
      System.exit(1);
    }
  } catch (Exception e) {
    throw new RuntimeException(e);
  } finally {
    final double duration = (System.currentTimeMillis() - startTime)/1000.0;
    out.println("Duration is " + duration + " seconds.");
  }
  out.println("Output file: " + hexfile);
}
 
Example 18
Project: hadoop   File: TestGenericOptionsParser.java   View Source Code Vote up 4 votes
/**
 * testing -fileCache option
 * @throws IOException
 */
public void testTokenCacheOption() throws IOException {
  FileSystem localFs = FileSystem.getLocal(conf);
  
  File tmpFile = new File(testDir, "tokenCacheFile");
  if(tmpFile.exists()) {
    tmpFile.delete();
  }
  String[] args = new String[2];
  // pass a files option 
  args[0] = "-tokenCacheFile";
  args[1] = tmpFile.toURI().toString();
  
  // test non existing file
  Throwable th = null;
  try {
    new GenericOptionsParser(conf, args);
  } catch (Exception e) {
    th = e;
  }
  assertNotNull(th);
  assertTrue("FileNotFoundException is not thrown",
      th instanceof FileNotFoundException);
  
  // create file
  Path tmpPath = localFs.makeQualified(new Path(tmpFile.toString()));
  Token<?> token = new Token<AbstractDelegationTokenIdentifier>(
      "identifier".getBytes(), "password".getBytes(),
      new Text("token-kind"), new Text("token-service"));
  Credentials creds = new Credentials();
  creds.addToken(new Text("token-alias"), token);
  creds.writeTokenStorageFile(tmpPath, conf);

  new GenericOptionsParser(conf, args);
  String fileName = conf.get("mapreduce.job.credentials.binary");
  assertNotNull("files is null", fileName);
  assertEquals("files option does not match", tmpPath.toString(), fileName);
  
  Credentials ugiCreds =
      UserGroupInformation.getCurrentUser().getCredentials();
  assertEquals(1, ugiCreds.numberOfTokens());
  Token<?> ugiToken = ugiCreds.getToken(new Text("token-alias"));
  assertNotNull(ugiToken);
  assertEquals(token, ugiToken);
  
  localFs.delete(new Path(testDir.getAbsolutePath()), true);
}
 
Example 19
Project: ditb   File: HFilePerformanceEvaluation.java   View Source Code Vote up 4 votes
/**
 * Add any supported codec or cipher to test the HFile read/write performance. 
 * Specify "none" to disable codec or cipher or both.  
 * @throws Exception
 */
private void runBenchmarks() throws Exception {
  final Configuration conf = new Configuration();
  final FileSystem fs = FileSystem.get(conf);
  final Path mf = fs.makeQualified(new Path("performanceevaluation.mapfile"));
  
  // codec=none cipher=none
  runWriteBenchmark(conf, fs, mf, "none", "none");
  runReadBenchmark(conf, fs, mf, "none", "none");
  
  // codec=gz cipher=none
  runWriteBenchmark(conf, fs, mf, "gz", "none");
  runReadBenchmark(conf, fs, mf, "gz", "none");

  // Add configuration for AES cipher
  final Configuration aesconf = new Configuration();
  aesconf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
  aesconf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
  aesconf.setInt("hfile.format.version", 3);
  final FileSystem aesfs = FileSystem.get(aesconf);
  final Path aesmf = aesfs.makeQualified(new Path("performanceevaluation.aes.mapfile"));

  // codec=none cipher=aes
  runWriteBenchmark(aesconf, aesfs, aesmf, "none", "aes");
  runReadBenchmark(aesconf, aesfs, aesmf, "none", "aes");

  // codec=gz cipher=aes
  runWriteBenchmark(aesconf, aesfs, aesmf, "gz", "aes");
  runReadBenchmark(aesconf, aesfs, aesmf, "gz", "aes");

  // cleanup test files
  if (fs.exists(mf)) {
    fs.delete(mf, true);
  }
  if (aesfs.exists(aesmf)) {
    aesfs.delete(aesmf, true);
  }
  
  // Print Result Summary
  LOG.info("\n***************\n" + "Result Summary" + "\n***************\n");
  LOG.info(testSummary.toString());

}
 
Example 20
Project: hadoop   File: TestLocalContainerLauncher.java   View Source Code Vote up 4 votes
private static void delete(File dir) throws IOException {
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.getLocal(conf);
  Path p = fs.makeQualified(new Path(dir.getAbsolutePath()));
  fs.delete(p, true);
}