Java Code Examples for org.apache.hadoop.fs.FileSystem.getLocal()

The following are Jave code examples for showing how to use getLocal() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: hadoop   File: TestBloomMapFile.java   Source Code and License Vote up 6 votes
/**
 * test {@code BloomMapFile.delete()} method
 */
public void testDeleteFile() {
  BloomMapFile.Writer writer = null;
  try {
    FileSystem fs = FileSystem.getLocal(conf);
    writer = new BloomMapFile.Writer(conf, TEST_FILE,
        MapFile.Writer.keyClass(IntWritable.class),
        MapFile.Writer.valueClass(Text.class));
    assertNotNull("testDeleteFile error !!!", writer);
    writer.close();
    BloomMapFile.delete(fs, TEST_FILE.toString());
  } catch (Exception ex) {
    fail("unexpect ex in testDeleteFile !!!");
  } finally {
    IOUtils.cleanup(null, writer);
  }
}
 
Example 2
Project: hadoop   File: TestMapFile.java   Source Code and License Vote up 6 votes
/**
 * test  {@code MapFile.Writer.rename()} method 
 */
@Test
public void testRename() {
  final String NEW_FILE_NAME = "test-new.mapfile";
  final String OLD_FILE_NAME = "test-old.mapfile";
  MapFile.Writer writer = null;
  try {
    FileSystem fs = FileSystem.getLocal(conf);
    writer = createWriter(OLD_FILE_NAME, IntWritable.class, IntWritable.class);
    writer.close();
    MapFile.rename(fs, new Path(TEST_DIR, OLD_FILE_NAME).toString(), 
        new Path(TEST_DIR, NEW_FILE_NAME).toString());
    MapFile.delete(fs, new Path(TEST_DIR, NEW_FILE_NAME).toString());
  } catch (IOException ex) {
    fail("testRename error " + ex);
  } finally {
    IOUtils.cleanup(null, writer);
  }
}
 
Example 3
Project: hadoop-oss   File: TestMapFile.java   Source Code and License Vote up 6 votes
@Test
@SuppressWarnings("deprecation")
public void testMidKeyEmpty() throws Exception {
  // Write a mapfile of simple data: keys are
  Path dirName = new Path(TEST_DIR, "testMidKeyEmpty.mapfile");
  FileSystem fs = FileSystem.getLocal(conf);
  Path qualifiedDirName = fs.makeQualified(dirName);

  MapFile.Writer writer = new MapFile.Writer(conf, fs,
      qualifiedDirName.toString(), IntWritable.class, IntWritable.class);
  writer.close();
  // Now do getClosest on created mapfile.
  MapFile.Reader reader = new MapFile.Reader(qualifiedDirName, conf);
  try {
    assertEquals(null, reader.midKey()); 
  } finally {
    reader.close();
  }
}
 
Example 4
Project: hadoop   File: TestChRootedFileSystem.java   Source Code and License Vote up 6 votes
@Before
public void setUp() throws Exception {
  // create the test root on local_fs
  Configuration conf = new Configuration();
  fSysTarget = FileSystem.getLocal(conf);
  fileSystemTestHelper = new FileSystemTestHelper();
  chrootedTo = fileSystemTestHelper.getAbsoluteTestRootPath(fSysTarget);
  // In case previous test was killed before cleanup
  fSysTarget.delete(chrootedTo, true);
  
  fSysTarget.mkdirs(chrootedTo);


  // ChRoot to the root of the testDirectory
  fSys = new ChRootedFileSystem(chrootedTo.toUri(), conf);
}
 
Example 5
Project: hadoop   File: TeraSort.java   Source Code and License Vote up 5 votes
public void setConf(Configuration conf) {
  try {
    FileSystem fs = FileSystem.getLocal(conf);
    this.conf = conf;
    Path partFile = new Path(TeraInputFormat.PARTITION_FILENAME);
    splitPoints = readPartitions(fs, partFile, conf);
    trie = buildTrie(splitPoints, 0, splitPoints.length, new Text(), 2);
  } catch (IOException ie) {
    throw new IllegalArgumentException("can't read partitions file", ie);
  }
}
 
Example 6
Project: dremio-oss   File: CoreStoreProviderImpl.java   Source Code and License Vote up 5 votes
private void createMetaDataFile(StoreBuilderConfig builderConfig) throws IOException {
  final KVStoreInfo kvStoreInfo = DataStoreUtils.toInfo(builderConfig);
  final Path metadataFile = new Path(metaDataFilesDir.getAbsolutePath(), format("%s%s", builderConfig.getName(), METADATA_FILE_SUFFIX));
  final FileSystem fs = FileSystem.getLocal(new Configuration());
  try (FSDataOutputStream metaDataOut = fs.create(metadataFile, true)) {
    ProtostuffUtil.toJSON(metaDataOut, kvStoreInfo, KVStoreInfo.getSchema(), false);
  }
}
 
Example 7
Project: hadoop   File: TestLineRecordReaderJobs.java   Source Code and License Vote up 5 votes
/**
 * Test the default behavior when the textinputformat.record.delimiter
 * configuration property is not specified
 * 
 * @throws IOException
 * @throws InterruptedException
 * @throws ClassNotFoundException
 */
@Test
public void testDefaultRecordDelimiters() throws IOException,
    InterruptedException, ClassNotFoundException {
  Configuration conf = new Configuration();
  FileSystem localFs = FileSystem.getLocal(conf);
  // cleanup
  localFs.delete(workDir, true);
  // creating input test file
  createInputFile(conf);
  createAndRunJob(conf);
  String expected = "0\tabc\n4\tdef\t\n9\tghi\n13\tjkl\n";
  assertEquals(expected, readOutputFile(conf));
}
 
Example 8
Project: hadoop   File: TestHistograms.java   Source Code and License Vote up 5 votes
/**
 * @throws IOException
 * 
 *           There should be files in the directory named by
 *           ${test.build.data}/rumen/histogram-test .
 * 
 *           There will be pairs of files, inputXxx.json and goldXxx.json .
 * 
 *           We read the input file as a HistogramRawTestData in json. Then we
 *           create a Histogram using the data field, and then a
 *           LoggedDiscreteCDF using the percentiles and scale field. Finally,
 *           we read the corresponding goldXxx.json as a LoggedDiscreteCDF and
 *           deepCompare them.
 */
@Test
public void testHistograms() throws IOException {
  final Configuration conf = new Configuration();
  final FileSystem lfs = FileSystem.getLocal(conf);
  final Path rootInputDir = new Path(
      System.getProperty("test.tools.input.dir", "")).makeQualified(lfs);
  final Path rootInputFile = new Path(rootInputDir, "rumen/histogram-tests");


  FileStatus[] tests = lfs.listStatus(rootInputFile);

  for (int i = 0; i < tests.length; ++i) {
    Path filePath = tests[i].getPath();
    String fileName = filePath.getName();
    if (fileName.startsWith("input")) {
      String testName = fileName.substring("input".length());
      Path goldFilePath = new Path(rootInputFile, "gold"+testName);
      assertTrue("Gold file dies not exist", lfs.exists(goldFilePath));
      LoggedDiscreteCDF newResult = histogramFileToCDF(filePath, lfs);
      System.out.println("Testing a Histogram for " + fileName);
      FSDataInputStream goldStream = lfs.open(goldFilePath);
      JsonObjectMapperParser<LoggedDiscreteCDF> parser = new JsonObjectMapperParser<LoggedDiscreteCDF>(
          goldStream, LoggedDiscreteCDF.class); 
      try {
        LoggedDiscreteCDF dcdf = parser.getNext();
        dcdf.deepCompare(newResult, new TreePath(null, "<root>"));
      } catch (DeepInequalityException e) {
        fail(e.path.toString());
      }
      finally {
          parser.close();
      }
    }
  }
}
 
Example 9
Project: hadoop   File: TestSeekBug.java   Source Code and License Vote up 5 votes
/**
 * Tests if the seek bug exists in FSDataInputStream in LocalFS.
 */
@Test
public void testSeekBugLocalFS() throws IOException {
  Configuration conf = new HdfsConfiguration();
  FileSystem fileSys = FileSystem.getLocal(conf);
  try {
    Path file1 = new Path("build/test/data", "seektest.dat");
    writeFile(fileSys, file1);
    seekReadFile(fileSys, file1);
    cleanupFile(fileSys, file1);
  } finally {
    fileSys.close();
  }
}
 
Example 10
Project: hadoop   File: TotalOrderPartitioner.java   Source Code and License Vote up 5 votes
/**
 * Read in the partition file and build indexing data structures.
 * If the keytype is {@link org.apache.hadoop.io.BinaryComparable} and
 * <tt>total.order.partitioner.natural.order</tt> is not false, a trie
 * of the first <tt>total.order.partitioner.max.trie.depth</tt>(2) + 1 bytes
 * will be built. Otherwise, keys will be located using a binary search of
 * the partition keyset using the {@link org.apache.hadoop.io.RawComparator}
 * defined for this job. The input file must be sorted with the same
 * comparator and contain {@link Job#getNumReduceTasks()} - 1 keys.
 */
@SuppressWarnings("unchecked") // keytype from conf not static
public void setConf(Configuration conf) {
  try {
    this.conf = conf;
    String parts = getPartitionFile(conf);
    final Path partFile = new Path(parts);
    final FileSystem fs = (DEFAULT_PATH.equals(parts))
      ? FileSystem.getLocal(conf)     // assume in DistributedCache
      : partFile.getFileSystem(conf);

    Job job = Job.getInstance(conf);
    Class<K> keyClass = (Class<K>)job.getMapOutputKeyClass();
    K[] splitPoints = readPartitions(fs, partFile, keyClass, conf);
    if (splitPoints.length != job.getNumReduceTasks() - 1) {
      throw new IOException("Wrong number of partitions in keyset");
    }
    RawComparator<K> comparator =
      (RawComparator<K>) job.getSortComparator();
    for (int i = 0; i < splitPoints.length - 1; ++i) {
      if (comparator.compare(splitPoints[i], splitPoints[i+1]) >= 0) {
        throw new IOException("Split points are out of order");
      }
    }
    boolean natOrder =
      conf.getBoolean(NATURAL_ORDER, true);
    if (natOrder && BinaryComparable.class.isAssignableFrom(keyClass)) {
      partitions = buildTrie((BinaryComparable[])splitPoints, 0,
          splitPoints.length, new byte[0],
          // Now that blocks of identical splitless trie nodes are 
          // represented reentrantly, and we develop a leaf for any trie
          // node with only one split point, the only reason for a depth
          // limit is to refute stack overflow or bloat in the pathological
          // case where the split points are long and mostly look like bytes 
          // iii...iixii...iii   .  Therefore, we make the default depth
          // limit large but not huge.
          conf.getInt(MAX_TRIE_DEPTH, 200));
    } else {
      partitions = new BinarySearchNode(splitPoints, comparator);
    }
  } catch (IOException e) {
    throw new IllegalArgumentException("Can't read partitions file", e);
  }
}
 
Example 11
Project: hadoop   File: TestCompressionEmulationUtils.java   Source Code and License Vote up 5 votes
/**
 * Test compressible {@link GridmixRecord}.
 */
@Test
public void testCompressibleGridmixRecord() throws IOException {
  JobConf conf = new JobConf();
  CompressionEmulationUtil.setCompressionEmulationEnabled(conf, true);
  CompressionEmulationUtil.setInputCompressionEmulationEnabled(conf, true);
  
  FileSystem lfs = FileSystem.getLocal(conf);
  int dataSize = 1024 * 1024 * 10; // 10 MB
  float ratio = 0.357F;
  
  // define the test's root temp directory
  Path rootTempDir =
      new Path(System.getProperty("test.build.data", "/tmp")).makeQualified(
          lfs.getUri(), lfs.getWorkingDirectory());

  Path tempDir = new Path(rootTempDir, 
                          "TestPossiblyCompressibleGridmixRecord");
  lfs.delete(tempDir, true);
  
  // define a compressible GridmixRecord
  GridmixRecord record = new GridmixRecord(dataSize, 0);
  record.setCompressibility(true, ratio); // enable compression
  
  conf.setClass(FileOutputFormat.COMPRESS_CODEC, GzipCodec.class, 
                CompressionCodec.class);
  org.apache.hadoop.mapred.FileOutputFormat.setCompressOutput(conf, true);
  
  // write the record to a file
  Path recordFile = new Path(tempDir, "record");
  OutputStream outStream = CompressionEmulationUtil
                             .getPossiblyCompressedOutputStream(recordFile, 
                                                                conf);    
  DataOutputStream out = new DataOutputStream(outStream);
  record.write(out);
  out.close();
  outStream.close();
  
  // open the compressed stream for reading
  Path actualRecordFile = recordFile.suffix(".gz");
  InputStream in = 
    CompressionEmulationUtil
      .getPossiblyDecompressedInputStream(actualRecordFile, conf, 0);
  
  // get the compressed file size
  long compressedFileSize = lfs.listStatus(actualRecordFile)[0].getLen();
  
  GridmixRecord recordRead = new GridmixRecord();
  recordRead.readFields(new DataInputStream(in));
  
  assertEquals("Record size mismatch in a compressible GridmixRecord",
               dataSize, recordRead.getSize());
  assertTrue("Failed to generate a compressible GridmixRecord",
             recordRead.getSize() > compressedFileSize);
  
  // check if the record can generate data with the desired compression ratio
  float seenRatio = ((float)compressedFileSize)/dataSize;
  assertEquals(CompressionEmulationUtil.standardizeCompressionRatio(ratio), 
      CompressionEmulationUtil.standardizeCompressionRatio(seenRatio), 1.0D);
}
 
Example 12
Project: aliyun-maxcompute-data-collectors   File: TestClobRef.java   Source Code and License Vote up 5 votes
public void testExternalSubdir() throws IOException {
  final String DATA = "This is the clob data!";
  final String FILENAME = "_lob/clobdata";

  try {
    doExternalTest(DATA, FILENAME);
  } finally {
    // remove dir we made.
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.getLocal(conf);
    String tmpDir = System.getProperty("test.build.data", "/tmp/");
    Path lobDir = new Path(new Path(tmpDir), "_lob");
    fs.delete(lobDir, true);
  }
}
 
Example 13
Project: hadoop-oss   File: TestMapFile.java   Source Code and License Vote up 5 votes
/**
 * test {@code MapFile.Writer.testFix} method
 */
@Test
public void testFix() {
  final String INDEX_LESS_MAP_FILE = "testFix.mapfile";
  int PAIR_SIZE = 20;
  MapFile.Writer writer = null;
  try {
    FileSystem fs = FileSystem.getLocal(conf);
    Path dir = new Path(TEST_DIR, INDEX_LESS_MAP_FILE);
    writer = createWriter(INDEX_LESS_MAP_FILE, IntWritable.class, Text.class);
    for (int i = 0; i < PAIR_SIZE; i++)
      writer.append(new IntWritable(0), new Text("value"));
    writer.close();

    File indexFile = new File(".", "." + INDEX_LESS_MAP_FILE + "/index");
    boolean isDeleted = false;
    if (indexFile.exists())
      isDeleted = indexFile.delete();

    if (isDeleted)
      assertTrue("testFix error !!!",
          MapFile.fix(fs, dir, IntWritable.class, Text.class, true, conf) == PAIR_SIZE);
  } catch (Exception ex) {
    fail("testFix error !!!");
  } finally {
    IOUtils.cleanup(null, writer);
  }
}
 
Example 14
Project: hadoop   File: TestNativeIO.java   Source Code and License Vote up 4 votes
private void assertPermissions(File f, int expected) throws IOException {
  FileSystem localfs = FileSystem.getLocal(new Configuration());
  FsPermission perms = localfs.getFileStatus(
    new Path(f.getAbsolutePath())).getPermission();
  assertEquals(expected, perms.toShort());
}
 
Example 15
Project: aliyun-maxcompute-data-collectors   File: TestIncrementalImport.java   Source Code and License Vote up 4 votes
/**
 * Assert that a directory contains a file with exactly one line
 * in it, containing the prescribed number 'val'.
 */
public void assertFirstSpecificNumber(String tableName, int val) {
  try {
    FileSystem fs = FileSystem.getLocal(new Configuration());
    Path warehouse = new Path(BaseSqoopTestCase.LOCAL_WAREHOUSE_DIR);
    Path tableDir = new Path(warehouse, tableName);
    FileStatus [] stats = fs.listStatus(tableDir);
    String [] filePaths = new String[stats.length];
    for (int i = 0; i < stats.length; i++) {
      filePaths[i] = stats[i].getPath().toString();
    }

    // Read the first file that is not a hidden file.
    boolean foundVal = false;
    for (String filePath : filePaths) {
      String fileName = new Path(filePath).getName();
      if (fileName.startsWith("_") || fileName.startsWith(".")) {
        continue;
      }

      if (foundVal) {
        // Make sure we don't have two or more "real" files in the dir.
        fail("Got an extra data-containing file in this directory.");
      }

      BufferedReader r = new BufferedReader(
          new InputStreamReader(fs.open(new Path(filePath))));
      try {
        String s = r.readLine();
        if (null == s) {
          fail("Unexpected empty file " + filePath + ".");
        }
        assertEquals(val, (int) Integer.valueOf(s.trim()));

        String nextLine = r.readLine();
        if (nextLine != null) {
          fail("Expected only one result, but got another line: " + nextLine);
        }

        // Successfully got the value we were looking for.
        foundVal = true;
      } finally {
        r.close();
      }
    }
  } catch (IOException e) {
    fail("Got unexpected exception: " + StringUtils.stringifyException(e));
  }
}
 
Example 16
Project: hadoop-oss   File: TestListFiles.java   Source Code and License Vote up 4 votes
@BeforeClass
public static void testSetUp() throws Exception {
  fs = FileSystem.getLocal(conf);
  fs.delete(TEST_DIR, true);
}
 
Example 17
Project: hadoop   File: TestMapFile.java   Source Code and License Vote up 4 votes
/**
 * Test getClosest feature.
 * 
 * @throws Exception
 */
@Test
@SuppressWarnings("deprecation")
public void testGetClosest() throws Exception {
  // Write a mapfile of simple data: keys are
  Path dirName = new Path(TEST_DIR, "testGetClosest.mapfile");
  FileSystem fs = FileSystem.getLocal(conf);
  Path qualifiedDirName = fs.makeQualified(dirName);
  // Make an index entry for every third insertion.
  MapFile.Writer.setIndexInterval(conf, 3);
  MapFile.Writer writer = null;
  MapFile.Reader reader = null;
  try {
    writer = new MapFile.Writer(conf, fs, qualifiedDirName.toString(),
      Text.class, Text.class);
    // Assert that the index interval is 1
    assertEquals(3, writer.getIndexInterval());
    // Add entries up to 100 in intervals of ten.
    final int FIRST_KEY = 10;
    for (int i = FIRST_KEY; i < 100; i += 10) {
      String iStr = Integer.toString(i);
      Text t = new Text("00".substring(iStr.length()) + iStr);
      writer.append(t, t);
    }
    writer.close();
    // Now do getClosest on created mapfile.
    reader = new MapFile.Reader(qualifiedDirName, conf);
    Text key = new Text("55");
    Text value = new Text();
    Text closest = (Text) reader.getClosest(key, value);
    // Assert that closest after 55 is 60
    assertEquals(new Text("60"), closest);
    // Get closest that falls before the passed key: 50
    closest = (Text) reader.getClosest(key, value, true);
    assertEquals(new Text("50"), closest);
    // Test get closest when we pass explicit key
    final Text TWENTY = new Text("20");
    closest = (Text) reader.getClosest(TWENTY, value);
    assertEquals(TWENTY, closest);
    closest = (Text) reader.getClosest(TWENTY, value, true);
    assertEquals(TWENTY, closest);
    // Test what happens at boundaries. Assert if searching a key that is
    // less than first key in the mapfile, that the first key is returned.
    key = new Text("00");
    closest = (Text) reader.getClosest(key, value);
    assertEquals(FIRST_KEY, Integer.parseInt(closest.toString()));

    // If we're looking for the first key before, and we pass in a key before
    // the first key in the file, we should get null
    closest = (Text) reader.getClosest(key, value, true);
    assertNull(closest);

    // Assert that null is returned if key is > last entry in mapfile.
    key = new Text("99");
    closest = (Text) reader.getClosest(key, value);
    assertNull(closest);

    // If we were looking for the key before, we should get the last key
    closest = (Text) reader.getClosest(key, value, true);
    assertEquals(new Text("90"), closest);
  } finally {
    IOUtils.cleanup(null, writer, reader);
  }
}
 
Example 18
Project: aliyun-maxcompute-data-collectors   File: TestImportJob.java   Source Code and License Vote up 4 votes
public void testManyColumns() throws Exception {
  int numberOfColumns = 7500;

  // Create a bunch of columns
  String[] colNames = new String[numberOfColumns];
  String[] colTypes = new String[numberOfColumns];
  String[] colVals = new String[numberOfColumns];
  List<String> testColVals = new ArrayList<String>(numberOfColumns);
  for (int i = 0; i < numberOfColumns; ++i) {
    colNames[i] = BASE_COL_NAME + Integer.toString(i);
    colTypes[i] = "VARCHAR(32)";
    colVals[i] = "'meep'";
    testColVals.add("meep");
  }
  createTableWithColTypesAndNames(colNames, colTypes, colVals);

  Configuration conf = new Configuration();

  // Make sure the output dir does not exist
  Path outputPath = new Path(new Path(getWarehouseDir()), getTableName());
  FileSystem fs = FileSystem.getLocal(conf);
  fs.delete(outputPath, true);
  assertTrue(!fs.exists(outputPath));

  String[] argv = getArgv(true, colNames, conf);

  Sqoop importer = new Sqoop(new ImportTool());
  try {
    int ret = Sqoop.runSqoop(importer, argv);
    assertTrue("Expected job to go through if target directory"
        + " does not exist.", 0 == ret);
    assertTrue(fs.exists(outputPath));
    // expecting one _SUCCESS file and one file containing data
    assertTrue("Expecting two files in the directory.",
        fs.listStatus(outputPath).length == 2);
    String[] output = getContent(conf, outputPath);
    assertEquals("Expected output and actual output should be same.",
        StringUtils.join(",", testColVals) + "\n",
        output[0]);
  } catch (Exception e) {
    // In debug mode, ImportException is wrapped in RuntimeException.
    LOG.info("Got exceptional return (expected: ok). msg is: " + e);
  }
}
 
Example 19
Project: hadoop   File: GenericOptionsParser.java   Source Code and License Vote up 4 votes
/**
 * Modify configuration according user-specified generic options
 * @param conf Configuration to be modified
 * @param line User-specified generic options
 */
private void processGeneralOptions(Configuration conf,
    CommandLine line) throws IOException {
  if (line.hasOption("fs")) {
    FileSystem.setDefaultUri(conf, line.getOptionValue("fs"));
  }

  if (line.hasOption("jt")) {
    String optionValue = line.getOptionValue("jt");
    if (optionValue.equalsIgnoreCase("local")) {
      conf.set("mapreduce.framework.name", optionValue);
    }

    conf.set("yarn.resourcemanager.address", optionValue, 
        "from -jt command line option");
  }
  if (line.hasOption("conf")) {
    String[] values = line.getOptionValues("conf");
    for(String value : values) {
      conf.addResource(new Path(value));
    }
  }

  if (line.hasOption('D')) {
    String[] property = line.getOptionValues('D');
    for(String prop : property) {
      String[] keyval = prop.split("=", 2);
      if (keyval.length == 2) {
        conf.set(keyval[0], keyval[1], "from command line");
      }
    }
  }

  if (line.hasOption("libjars")) {
    conf.set("tmpjars", 
             validateFiles(line.getOptionValue("libjars"), conf),
             "from -libjars command line option");
    //setting libjars in client classpath
    URL[] libjars = getLibJars(conf);
    if(libjars!=null && libjars.length>0) {
      conf.setClassLoader(new URLClassLoader(libjars, conf.getClassLoader()));
      Thread.currentThread().setContextClassLoader(
          new URLClassLoader(libjars, 
              Thread.currentThread().getContextClassLoader()));
    }
  }
  if (line.hasOption("files")) {
    conf.set("tmpfiles", 
             validateFiles(line.getOptionValue("files"), conf),
             "from -files command line option");
  }
  if (line.hasOption("archives")) {
    conf.set("tmparchives", 
              validateFiles(line.getOptionValue("archives"), conf),
              "from -archives command line option");
  }
  conf.setBoolean("mapreduce.client.genericoptionsparser.used", true);
  
  // tokensFile
  if(line.hasOption("tokenCacheFile")) {
    String fileName = line.getOptionValue("tokenCacheFile");
    // check if the local file exists
    FileSystem localFs = FileSystem.getLocal(conf);
    Path p = localFs.makeQualified(new Path(fileName));
    if (!localFs.exists(p)) {
        throw new FileNotFoundException("File "+fileName+" does not exist.");
    }
    if(LOG.isDebugEnabled()) {
      LOG.debug("setting conf tokensFile: " + fileName);
    }
    UserGroupInformation.getCurrentUser().addCredentials(
        Credentials.readTokenStorageFile(p, conf));
    conf.set("mapreduce.job.credentials.binary", p.toString(),
             "from -tokenCacheFile command line option");

  }
}
 
Example 20
Project: hadoop   File: TestLineRecordReaderJobs.java   Source Code and License Vote up 2 votes
/**
 * Reads the output file into a string
 *
 * @param conf
 * @return
 * @throws IOException
 */
public String readOutputFile(Configuration conf) throws IOException {
  FileSystem localFs = FileSystem.getLocal(conf);
  Path file = new Path(outputDir, "part-r-00000");
  return UtilsForTests.slurpHadoop(file, localFs);
}