Java Code Examples for org.apache.hadoop.fs.Path

The following are top voted examples for showing how to use org.apache.hadoop.fs.Path. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: hadoop   File: TestJoinDatamerge.java   Source Code and License 7 votes vote down vote up
private static void checkOuterConsistency(Job job, Path[] src) 
    throws IOException {
  Path outf = FileOutputFormat.getOutputPath(job);
  FileStatus[] outlist = cluster.getFileSystem().listStatus(outf, new 
                           Utils.OutputFileUtils.OutputFilesFilter());
  assertEquals("number of part files is more than 1. It is" + outlist.length,
    1, outlist.length);
  assertTrue("output file with zero length" + outlist[0].getLen(),
    0 < outlist[0].getLen());
  SequenceFile.Reader r =
    new SequenceFile.Reader(cluster.getFileSystem(),
        outlist[0].getPath(), job.getConfiguration());
  IntWritable k = new IntWritable();
  IntWritable v = new IntWritable();
  while (r.next(k, v)) {
    assertEquals("counts does not match", v.get(),
      countProduct(k, src, job.getConfiguration()));
  }
  r.close();
}
 
Example 2
Project: big-data-benchmark   File: HadoopWordCount.java   Source Code and License 7 votes vote down vote up
public static void main(String[] args) throws Exception {
    BasicConfigurator.configure();
    Configuration conf = new Configuration();
    conf.setQuietMode(true);

    Job job = Job.getInstance(conf, "WordCount");
    job.setJarByClass(HadoopWordCount.class);

    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(IntWritable.class);

    job.setMapperClass(Map.class);
    job.setCombinerClass(Reduce.class);
    job.setReducerClass(Reduce.class);

    job.setInputFormatClass(TextInputFormat.class);
    job.setOutputFormatClass(TextOutputFormat.class);

    FileInputFormat.setInputPaths(job, new Path(args[0]));
    FileOutputFormat.setOutputPath(job, new Path(args[1] + "_" + System.currentTimeMillis()));

    long t = System.currentTimeMillis();
    job.waitForCompletion(true);

    System.out.println("TotalTime=" + (System.currentTimeMillis() - t));
}
 
Example 3
Project: hadoop   File: FTPFileSystem.java   Source Code and License 7 votes vote down vote up
/**
 * Convenience method, so that we don't open a new connection when using this
 * method from within another method. Otherwise every API invocation incurs
 * the overhead of opening/closing a TCP connection.
 */
private boolean mkdirs(FTPClient client, Path file, FsPermission permission)
    throws IOException {
  boolean created = true;
  Path workDir = new Path(client.printWorkingDirectory());
  Path absolute = makeAbsolute(workDir, file);
  String pathName = absolute.getName();
  if (!exists(client, absolute)) {
    Path parent = absolute.getParent();
    created = (parent == null || mkdirs(client, parent, FsPermission
        .getDirDefault()));
    if (created) {
      String parentDir = parent.toUri().getPath();
      client.changeWorkingDirectory(parentDir);
      created = created && client.makeDirectory(pathName);
    }
  } else if (isFile(client, absolute)) {
    throw new ParentNotDirectoryException(String.format(
        "Can't make directory for path %s since it is a file.", absolute));
  }
  return created;
}
 
Example 4
Project: oryx2   File: KMeansUpdate.java   Source Code and License 7 votes vote down vote up
/**
 * @param sparkContext    active Spark Context
 * @param trainData       training data on which to build a model
 * @param hyperParameters ordered list of hyper parameter values to use in building model
 * @param candidatePath   directory where additional model files can be written
 * @return a {@link PMML} representation of a model trained on the given data
 */
@Override
public PMML buildModel(JavaSparkContext sparkContext,
                       JavaRDD<String> trainData,
                       List<?> hyperParameters,
                       Path candidatePath) {
  int numClusters = (Integer) hyperParameters.get(0);
  Preconditions.checkArgument(numClusters > 1);
  log.info("Building KMeans Model with {} clusters", numClusters);

  JavaRDD<Vector> trainingData = parsedToVectorRDD(trainData.map(MLFunctions.PARSE_FN));
  KMeansModel kMeansModel = KMeans.train(trainingData.rdd(), numClusters, maxIterations,
                                         numberOfRuns, initializationStrategy);

  return kMeansModelToPMML(kMeansModel, fetchClusterCountsFromModel(trainingData, kMeansModel));
}
 
Example 5
Project: ditb   File: DataBlockEncodingTool.java   Source Code and License 6 votes vote down vote up
/**
 * Test a data block encoder on the given HFile. Output results to console.
 * @param kvLimit The limit of KeyValue which will be analyzed.
 * @param hfilePath an HFile path on the file system.
 * @param compressionName Compression algorithm used for comparison.
 * @param doBenchmark Run performance benchmarks.
 * @param doVerify Verify correctness.
 * @throws IOException When pathName is incorrect.
 */
public static void testCodecs(Configuration conf, int kvLimit,
    String hfilePath, String compressionName, boolean doBenchmark,
    boolean doVerify) throws IOException {
  // create environment
  Path path = new Path(hfilePath);
  CacheConfig cacheConf = new CacheConfig(conf);
  FileSystem fs = FileSystem.get(conf);
  StoreFile hsf = new StoreFile(fs, path, conf, cacheConf,
    BloomType.NONE);

  StoreFile.Reader reader = hsf.createReader();
  reader.loadFileInfo();
  KeyValueScanner scanner = reader.getStoreFileScanner(true, true);

  // run the utilities
  DataBlockEncodingTool comp = new DataBlockEncodingTool(compressionName);
  int majorVersion = reader.getHFileVersion();
  comp.useHBaseChecksum = majorVersion > 2
      || (majorVersion == 2 && reader.getHFileMinorVersion() >= HFileReaderV2.MINOR_VERSION_WITH_CHECKSUM);
  comp.checkStatistics(scanner, kvLimit);
  if (doVerify) {
    comp.verifyCodecs(scanner, kvLimit);
  }
  if (doBenchmark) {
    comp.benchmarkCodecs();
  }
  comp.displayStatistics();

  // cleanup
  scanner.close();
  reader.close(cacheConf.shouldEvictOnClose());
}
 
Example 6
Project: hadoop   File: TestNameNodeMetrics.java   Source Code and License 6 votes vote down vote up
/**
 * Test NN ReadOps Count and WriteOps Count
 */
@Test
public void testReadWriteOps() throws Exception {
  MetricsRecordBuilder rb = getMetrics(NN_METRICS);
  long startWriteCounter = MetricsAsserts.getLongCounter("TransactionsNumOps",
      rb);
  Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat");

  //Perform create file operation
  createFile(file1_Path, 1024 * 1024,(short)2);

  // Perform read file operation on earlier created file
  readFile(fs, file1_Path);
  MetricsRecordBuilder rbNew = getMetrics(NN_METRICS);
  assertTrue(MetricsAsserts.getLongCounter("TransactionsNumOps", rbNew) >
      startWriteCounter);
}
 
Example 7
Project: hadoop-oss   File: TestNativeIO.java   Source Code and License 6 votes vote down vote up
@Test (timeout = 30000)
public void testFstat() throws Exception {
  FileOutputStream fos = new FileOutputStream(
    new File(TEST_DIR, "testfstat"));
  NativeIO.POSIX.Stat stat = NativeIO.POSIX.getFstat(fos.getFD());
  fos.close();
  LOG.info("Stat: " + String.valueOf(stat));

  String owner = stat.getOwner();
  String expectedOwner = System.getProperty("user.name");
  if (Path.WINDOWS) {
    UserGroupInformation ugi =
        UserGroupInformation.createRemoteUser(expectedOwner);
    final String adminsGroupString = "Administrators";
    if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) {
      expectedOwner = adminsGroupString;
    }
  }
  assertEquals(expectedOwner, owner);
  assertNotNull(stat.getGroup());
  assertTrue(!stat.getGroup().isEmpty());
  assertEquals("Stat mode field should indicate a regular file", S_IFREG,
    stat.getMode() & S_IFMT);
}
 
Example 8
Project: hadoop-oss   File: TestCodec.java   Source Code and License 6 votes vote down vote up
private void codecTestMapFile(Class<? extends CompressionCodec> clazz,
    CompressionType type, int records) throws Exception {
  
  FileSystem fs = FileSystem.get(conf);
  LOG.info("Creating MapFiles with " + records  + 
          " records using codec " + clazz.getSimpleName());
  Path path = new Path(new Path(
      System.getProperty("test.build.data", "/tmp")),
    clazz.getSimpleName() + "-" + type + "-" + records);

  LOG.info("Writing " + path);
  createMapFile(conf, fs, path, clazz.newInstance(), type, records);
  MapFile.Reader reader = new MapFile.Reader(path, conf);
  Text key1 = new Text("002");
  assertNotNull(reader.get(key1, new Text()));
  Text key2 = new Text("004");
  assertNotNull(reader.get(key2, new Text()));
}
 
Example 9
Project: hadoop   File: BlockReportTestBase.java   Source Code and License 6 votes vote down vote up
/**
 * Test writes a file and closes it.
 * Block reported is generated with a bad GS for a single block.
 * Block report is forced and the check for # of corrupted blocks is performed.
 *
 * @throws IOException in case of an error
 */
@Test(timeout=300000)
public void blockReport_03() throws IOException {
  final String METHOD_NAME = GenericTestUtils.getMethodName();
  Path filePath = new Path("/" + METHOD_NAME + ".dat");
  writeFile(METHOD_NAME, FILE_SIZE, filePath);

  // all blocks belong to the same file, hence same BP
  DataNode dn = cluster.getDataNodes().get(DN_N0);
  String poolId = cluster.getNamesystem().getBlockPoolId();
  DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
  StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
  sendBlockReports(dnR, poolId, reports);
  printStats();

  assertThat("Wrong number of corrupt blocks",
             cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
  assertThat("Wrong number of PendingDeletion blocks",
             cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
}
 
Example 10
Project: hadoop   File: TestConfiguration.java   Source Code and License 6 votes vote down vote up
public void testBooleanValues() throws IOException {
  out=new BufferedWriter(new FileWriter(CONFIG));
  startConfig();
  appendProperty("test.bool1", "true");
  appendProperty("test.bool2", "false");
  appendProperty("test.bool3", "  true ");
  appendProperty("test.bool4", " false ");
  appendProperty("test.bool5", "foo");
  appendProperty("test.bool6", "TRUE");
  appendProperty("test.bool7", "FALSE");
  appendProperty("test.bool8", "");
  endConfig();
  Path fileResource = new Path(CONFIG);
  conf.addResource(fileResource);
  assertEquals(true, conf.getBoolean("test.bool1", false));
  assertEquals(false, conf.getBoolean("test.bool2", true));
  assertEquals(true, conf.getBoolean("test.bool3", false));
  assertEquals(false, conf.getBoolean("test.bool4", true));
  assertEquals(true, conf.getBoolean("test.bool5", true));
  assertEquals(true, conf.getBoolean("test.bool6", false));
  assertEquals(false, conf.getBoolean("test.bool7", true));
  assertEquals(false, conf.getBoolean("test.bool8", false));
}
 
Example 11
Project: hadoop-oss   File: TestFileStatus.java   Source Code and License 6 votes vote down vote up
@Test
public void testCompareTo() throws IOException {
  Path path1 = new Path("path1");
  Path path2 = new Path("path2");
  FileStatus fileStatus1 =
      new FileStatus(1, true, 1, 1, 1, 1, FsPermission.valueOf("-rw-rw-rw-"),
          "one", "one", null, path1);
  FileStatus fileStatus2 =
      new FileStatus(1, true, 1, 1, 1, 1, FsPermission.valueOf("-rw-rw-rw-"),
          "one", "one", null, path2);
  assertTrue(fileStatus1.compareTo(fileStatus2) < 0);
  assertTrue(fileStatus2.compareTo(fileStatus1) > 0);

  List<FileStatus> statList = new ArrayList<>();
  statList.add(fileStatus1);
  statList.add(fileStatus2);
  assertTrue(Collections.binarySearch(statList, fileStatus1) > -1);
}
 
Example 12
Project: hadoop   File: FileInputFormat.java   Source Code and License 6 votes vote down vote up
/**
 * Add files in the input path recursively into the results.
 * @param result
 *          The List to store all files.
 * @param fs
 *          The FileSystem.
 * @param path
 *          The input path.
 * @param inputFilter
 *          The input filter that can be used to filter files/dirs. 
 * @throws IOException
 */
protected void addInputPathRecursively(List<FileStatus> result,
    FileSystem fs, Path path, PathFilter inputFilter) 
    throws IOException {
  RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(path);
  while (iter.hasNext()) {
    LocatedFileStatus stat = iter.next();
    if (inputFilter.accept(stat.getPath())) {
      if (stat.isDirectory()) {
        addInputPathRecursively(result, fs, stat.getPath(), inputFilter);
      } else {
        result.add(stat);
      }
    }
  }
}
 
Example 13
Project: hadoop   File: TestSharedFileDescriptorFactory.java   Source Code and License 6 votes vote down vote up
@Test(timeout=10000)
public void testCleanupRemainders() throws Exception {
  Assume.assumeTrue(NativeIO.isAvailable());
  Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
  File path = new File(TEST_BASE, "testCleanupRemainders");
  path.mkdirs();
  String remainder1 = path.getAbsolutePath() + 
      Path.SEPARATOR + "woot2_remainder1";
  String remainder2 = path.getAbsolutePath() +
      Path.SEPARATOR + "woot2_remainder2";
  createTempFile(remainder1);
  createTempFile(remainder2);
  SharedFileDescriptorFactory.create("woot2_", 
      new String[] { path.getAbsolutePath() });
  // creating the SharedFileDescriptorFactory should have removed 
  // the remainders
  Assert.assertFalse(new File(remainder1).exists());
  Assert.assertFalse(new File(remainder2).exists());
  FileUtil.fullyDelete(path);
}
 
Example 14
Project: ditb   File: FSTableDescriptors.java   Source Code and License 6 votes vote down vote up
@Override
public Map<String, HTableDescriptor> getByNamespace(String name)
throws IOException {
  Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>();
  List<Path> tableDirs =
      FSUtils.getLocalTableDirs(fs, FSUtils.getNamespaceDir(rootdir, name));
  for (Path d: tableDirs) {
    HTableDescriptor htd = null;
    try {
      htd = get(FSUtils.getTableName(d));
    } catch (FileNotFoundException fnfe) {
      // inability of retrieving one HTD shouldn't stop getting the remaining
      LOG.warn("Trouble retrieving htd", fnfe);
    }
    if (htd == null) continue;
    htds.put(FSUtils.getTableName(d).getNameAsString(), htd);
  }
  return htds;
}
 
Example 15
Project: hadoop   File: RandomTextWriterJob.java   Source Code and License 6 votes vote down vote up
/**
 * This is the main routine for launching a distributed random write job.
 * It runs 10 maps/node and each node writes 1 gig of data to a DFS file.
 * The reduce doesn't do anything.
 * 
 * @throws IOException 
 */
public int run(String[] args) throws Exception {    
  if (args.length == 0) {
    return printUsage();    
  }
  Job job = createJob(getConf());
  FileOutputFormat.setOutputPath(job, new Path(args[0]));
  Date startTime = new Date();
  System.out.println("Job started: " + startTime);
  int ret = job.waitForCompletion(true) ? 0 : 1;
  Date endTime = new Date();
  System.out.println("Job ended: " + endTime);
  System.out.println("The job took " + 
                     (endTime.getTime() - startTime.getTime()) /1000 + 
                     " seconds.");
  
  return ret;
}
 
Example 16
Project: hadoop-oss   File: TestTFileSplit.java   Source Code and License 6 votes vote down vote up
void createFile(int count, String compress) throws IOException {
  conf = new Configuration();
  path = new Path(ROOT, outputFile + "." + compress);
  fs = path.getFileSystem(conf);
  FSDataOutputStream out = fs.create(path);
  Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

  int nx;
  for (nx = 0; nx < count; nx++) {
    byte[] key = composeSortedKey(KEY, count, nx).getBytes();
    byte[] value = (VALUE + nx).getBytes();
    writer.append(key, value);
  }
  writer.close();
  out.close();
}
 
Example 17
Project: Wikipedia-Index   File: MaxThreeLabel.java   Source Code and License 6 votes vote down vote up
public static void main(String[] args) throws Exception {
	Configuration conf = new Configuration();
	
	Job job =Job.getInstance(conf);
	job.setJobName("MaxThreeLabel");
	job.setJarByClass(MaxThreeLabel.class);
	
	job.setMapOutputKeyClass(Text.class);
	job.setMapOutputValueClass(TextArrayWritable.class);
	
	job.setOutputKeyClass(Text.class);
	job.setOutputValueClass(Text.class);
	
	job.setMapperClass(MaxThreeLabelMap.class);
	job.setReducerClass(MaxThreeLabelReduce.class);
	
	job.setInputFormatClass(TextInputFormat.class);
	job.setOutputFormatClass(TextOutputFormat.class);
	
	FileInputFormat.addInputPath(job, new Path(args[0]));
	FileOutputFormat.setOutputPath(job, new Path(args[1]));
	boolean wait = job.waitForCompletion(true);
	System.exit(wait ? 0 : 1);
}
 
Example 18
Project: oryx2   File: SaveToHDFSFunction.java   Source Code and License 6 votes vote down vote up
@Override
public void call(JavaPairRDD<K,M> rdd, Time time) throws IOException {
  if (rdd.isEmpty()) {
    log.info("RDD was empty, not saving to HDFS");
  } else {
    String file = prefix + "-" + time.milliseconds() + "." + suffix;
    Path path = new Path(file);
    FileSystem fs = FileSystem.get(path.toUri(), hadoopConf);
    if (fs.exists(path)) {
      log.warn("Saved data already existed, possibly from a failed job. Deleting {}", path);
      fs.delete(path, true);
    }
    log.info("Saving RDD to HDFS at {}", file);
    rdd.mapToPair(
        new ValueToWritableFunction<>(keyClass, messageClass, keyWritableClass, messageWritableClass)
    ).saveAsNewAPIHadoopFile(
        file,
        keyWritableClass,
        messageWritableClass,
        SequenceFileOutputFormat.class,
        hadoopConf);
  }
}
 
Example 19
Project: Transwarp-Sample-Code   File: AvroEventSerializer.java   Source Code and License 6 votes vote down vote up
private Schema loadFromUrl(String schemaUrl) throws IOException {
  Configuration conf = new Configuration();
  Schema.Parser parser = new Schema.Parser();
  if (schemaUrl.toLowerCase(Locale.ENGLISH).startsWith("hdfs://")) {
    FileSystem fs = FileSystem.get(conf);
    FSDataInputStream input = null;
    try {
      input = fs.open(new Path(schemaUrl));
      return parser.parse(input);
    } finally {
      if (input != null) {
        input.close();
      }
    }
  } else {
    InputStream is = null;
    try {
      is = new URL(schemaUrl).openStream();
      return parser.parse(is);
    } finally {
      if (is != null) {
        is.close();
      }
    }
  }
}
 
Example 20
Project: ditb   File: TestImportTsv.java   Source Code and License 6 votes vote down vote up
@Test
public void testBulkOutputWithoutAnExistingTable() throws Exception {
  String table = "test-" + UUID.randomUUID();

  // Prepare the arguments required for the test.
  Path hfiles = new Path(util.getDataTestDirOnTestFS(table), "hfiles");
  String[] args = new String[] {
      "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B",
      "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b",
      "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(),
      table
  };

  doMROnTableTest(util, FAMILY, null, args, 3);
  util.deleteTable(table);
}
 
Example 21
Project: ditb   File: AbstractHFileWriter.java   Source Code and License 6 votes vote down vote up
public AbstractHFileWriter(CacheConfig cacheConf,
    FSDataOutputStream outputStream, Path path, 
    KVComparator comparator, HFileContext fileContext) {
  this.outputStream = outputStream;
  this.path = path;
  this.name = path != null ? path.getName() : outputStream.toString();
  this.hFileContext = fileContext;
  DataBlockEncoding encoding = hFileContext.getDataBlockEncoding();
  if (encoding != DataBlockEncoding.NONE) {
    this.blockEncoder = new HFileDataBlockEncoderImpl(encoding);
  } else {
    this.blockEncoder = NoOpDataBlockEncoder.INSTANCE;
  }
  this.comparator = comparator != null ? comparator
      : KeyValue.COMPARATOR;

  closeOutputStream = path != null;
  this.cacheConf = cacheConf;
}
 
Example 22
Project: hadoop   File: TestDiskspaceQuotaUpdate.java   Source Code and License 6 votes vote down vote up
/**
 * Test if the quota can be correctly updated for create file
 */
@Test (timeout=60000)
public void testQuotaUpdateWithFileCreate() throws Exception  {
  final Path foo = new Path(dir, "foo");
  Path createdFile = new Path(foo, "created_file.data");
  dfs.mkdirs(foo);
  dfs.setQuota(foo, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
  long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
  DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
      fileLen, BLOCKSIZE, REPLICATION, seed);
  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());
  QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed();
  assertEquals(2, cnt.getNameSpace());
  assertEquals(fileLen * REPLICATION, cnt.getStorageSpace());
}
 
Example 23
Project: hadoop   File: Gridmix.java   Source Code and License 6 votes vote down vote up
/**
 * Setup gridmix for emulation of distributed cache load. This includes
 * generation of distributed cache files, if needed.
 * @param conf gridmix configuration
 * @param traceIn trace file path(if it is '-', then trace comes from the
 *                stream stdin)
 * @param ioPath &lt;ioPath&gt;/input/ is the dir where input data (a) exists
 *               or (b) is generated. &lt;ioPath&gt;/distributedCache/ is the
 *               folder where distributed cache data (a) exists or (b) is to be
 *               generated by gridmix.
 * @param generate true if -generate option was specified
 * @return exit code
 * @throws IOException
 * @throws InterruptedException
 */
private int setupDistCacheEmulation(Configuration conf, String traceIn,
    Path ioPath, boolean generate) throws IOException, InterruptedException {
  distCacheEmulator.init(traceIn, factory.jobCreator, generate);
  int exitCode = 0;
  if (distCacheEmulator.shouldGenerateDistCacheData() ||
      distCacheEmulator.shouldEmulateDistCacheLoad()) {

    JobStoryProducer jsp = createJobStoryProducer(traceIn, conf);
    exitCode = distCacheEmulator.setupGenerateDistCacheData(jsp);
    if (exitCode == 0) {
      // If there are files to be generated, run a MapReduce job to generate
      // these distributed cache files of all the simulated jobs of this trace.
      writeDistCacheData(conf);
    }
  }
  return exitCode;
}
 
Example 24
Project: paraflow   File: FSFactory.java   Source Code and License 6 votes vote down vote up
public List<Path> listFiles(Path dirPath)
{
    List<Path> files = new ArrayList<>();
    if (!getFS().isPresent()) {
        throw new FileSystemNotFoundException("");
    }
    FileStatus[] fileStatuses = new FileStatus[0];
    try {
        fileStatuses = getFS().get().listStatus(dirPath);
    }
    catch (IOException e) {
        log.error(e);
    }
    for (FileStatus f : fileStatuses) {
        if (f.isFile()) {
            files.add(f.getPath());
        }
    }
    return files;
}
 
Example 25
Project: hadoop   File: AbstractContractCreateTest.java   Source Code and License 6 votes vote down vote up
@Test
public void testCreateFileOverExistingFileNoOverwrite() throws Throwable {
  describe("Verify overwriting an existing file fails");
  Path path = path("testCreateFileOverExistingFileNoOverwrite");
  byte[] data = dataset(256, 'a', 'z');
  writeDataset(getFileSystem(), path, data, data.length, 1024, false);
  byte[] data2 = dataset(10 * 1024, 'A', 'Z');
  try {
    writeDataset(getFileSystem(), path, data2, data2.length, 1024, false);
    fail("writing without overwrite unexpectedly succeeded");
  } catch (FileAlreadyExistsException expected) {
    //expected
    handleExpectedException(expected);
  } catch (IOException relaxed) {
    handleRelaxedException("Creating a file over a file with overwrite==false",
                           "FileAlreadyExistsException",
                           relaxed);
  }
}
 
Example 26
Project: hadoop   File: TestSeekBug.java   Source Code and License 6 votes vote down vote up
/**
* Test (expected to throw IOE) for <code>FSDataInpuStream#seek</code>
* when the position argument is larger than the file size.
*/
@Test (expected=IOException.class)
public void testSeekPastFileSize() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  try {
    Path seekFile = new Path("seekboundaries.dat");
    DFSTestUtil.createFile(
      fs,
      seekFile,
      ONEMB,
      fs.getDefaultReplication(seekFile),
      seed);
    FSDataInputStream stream = fs.open(seekFile);
    // Perform "safe seek" (expected to pass)
    stream.seek(65536);
    assertEquals(65536, stream.getPos());
    // expect IOE for this call
    stream.seek(ONEMB + ONEMB + ONEMB);
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 27
Project: hadoop   File: Merger.java   Source Code and License 6 votes vote down vote up
public static <K extends Object, V extends Object>
RawKeyValueIterator merge(Configuration conf, FileSystem fs,
                          Class<K> keyClass, Class<V> valueClass,
                          List<Segment<K, V>> segments,
                          int mergeFactor, Path tmpDir,
                          RawComparator<K> comparator, Progressable reporter,
                          boolean sortSegments,
                          Counters.Counter readsCounter,
                          Counters.Counter writesCounter,
                          Progress mergePhase)
    throws IOException {
  return new MergeQueue<K, V>(conf, fs, segments, comparator, reporter,
                         sortSegments,
                         TaskType.REDUCE).merge(keyClass, valueClass,
                                             mergeFactor, tmpDir,
                                             readsCounter, writesCounter,
                                             mergePhase);
}
 
Example 28
Project: hadoop   File: HistoryFileManager.java   Source Code and License 6 votes vote down vote up
private void mkdir(FileContext fc, Path path, FsPermission fsp)
    throws IOException {
  if (!fc.util().exists(path)) {
    try {
      fc.mkdir(path, fsp, true);

      FileStatus fsStatus = fc.getFileStatus(path);
      LOG.info("Perms after creating " + fsStatus.getPermission().toShort()
          + ", Expected: " + fsp.toShort());
      if (fsStatus.getPermission().toShort() != fsp.toShort()) {
        LOG.info("Explicitly setting permissions to : " + fsp.toShort()
            + ", " + fsp);
        fc.setPermission(path, fsp);
      }
    } catch (FileAlreadyExistsException e) {
      LOG.info("Directory: [" + path + "] already exists.");
    }
  }
}
 
Example 29
Project: hadoop   File: TestFSDownload.java   Source Code and License 6 votes vote down vote up
static LocalResource createJarFile(FileContext files, Path p, int len,
    Random r, LocalResourceVisibility vis) throws IOException,
    URISyntaxException {
  byte[] bytes = new byte[len];
  r.nextBytes(bytes);

  File archiveFile = new File(p.toUri().getPath() + ".jar");
  archiveFile.createNewFile();
  JarOutputStream out = new JarOutputStream(
      new FileOutputStream(archiveFile));
  out.putNextEntry(new JarEntry(p.getName()));
  out.write(bytes);
  out.closeEntry();
  out.close();

  LocalResource ret = recordFactory.newRecordInstance(LocalResource.class);
  ret.setResource(ConverterUtils.getYarnUrlFromPath(new Path(p.toString()
      + ".jar")));
  ret.setSize(len);
  ret.setType(LocalResourceType.ARCHIVE);
  ret.setVisibility(vis);
  ret.setTimestamp(files.getFileStatus(new Path(p.toString() + ".jar"))
      .getModificationTime());
  return ret;
}
 
Example 30
Project: hadoop   File: TestWebHdfsWithAuthenticationFilter.java   Source Code and License 5 votes vote down vote up
@Test
public void testWebHdfsAuthFilter() throws IOException {
  // getFileStatus() is supposed to pass through with the default filter.
  authorized = false;
  try {
    fs.getFileStatus(new Path("/"));
    Assert.fail("The filter fails to block the request");
  } catch (IOException e) {
  }
  authorized = true;
  fs.getFileStatus(new Path("/"));
}
 
Example 31
Project: hadoop   File: DFSTestUtil.java   Source Code and License 5 votes vote down vote up
public void waitReplication(FileSystem fs, String topdir, short value) 
    throws IOException, InterruptedException, TimeoutException {
  Path root = new Path(topdir);

  /** wait for the replication factor to settle down */
  for (int idx = 0; idx < nFiles; idx++) {
    waitReplication(fs, new Path(root, files[idx].getName()), value);
  }
}
 
Example 32
Project: hadoop   File: TestSequenceFileSync.java   Source Code and License 5 votes vote down vote up
@Test
public void testLowSyncpoint() throws IOException {
  final Configuration conf = new Configuration();
  final FileSystem fs = FileSystem.getLocal(conf);
  final Path path = new Path(System.getProperty("test.build.data", "/tmp"),
    "sequencefile.sync.test");
  final IntWritable input = new IntWritable();
  final Text val = new Text();
  SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf, path,
      IntWritable.class, Text.class);
  try {
    writeSequenceFile(writer, NUMRECORDS);
    for (int i = 0; i < 5 ; i++) {
     final SequenceFile.Reader reader;
     
     //try different SequenceFile.Reader constructors
     if (i % 2 == 0) {
       reader = new SequenceFile.Reader(fs, path, conf);
     } else {
       final FSDataInputStream in = fs.open(path);
       final long length = fs.getFileStatus(path).getLen();
       final int buffersize = conf.getInt("io.file.buffer.size", 4096);
       reader = new SequenceFile.Reader(in, buffersize, 0L, length, conf);
     }

     try {
        forOffset(reader, input, val, i, 0, 0);
        forOffset(reader, input, val, i, 65, 0);
        forOffset(reader, input, val, i, 2000, 21);
        forOffset(reader, input, val, i, 0, 0);
      } finally {
        reader.close();
      }
    }
  } finally {
    fs.delete(path, false);
  }
}
 
Example 33
Project: ditb   File: HRegionFileSystem.java   Source Code and License 5 votes vote down vote up
/**
 * Remove the region family from disk, archiving the store files.
 *
 * @param familyName Column Family Name
 * @throws IOException if an error occours during the archiving
 */
public void deleteFamily(final String familyName) throws IOException {
  // archive family store files
  HFileArchiver.archiveFamily(fs, conf, regionInfoForFs, tableDir, Bytes.toBytes(familyName));

  // delete the family folder
  Path familyDir = getStoreDir(familyName);
  if (fs.exists(familyDir) && !deleteDir(familyDir)) throw new IOException(
      "Could not delete family " + familyName + " from FileSystem for region " + regionInfoForFs
          .getRegionNameAsString() + "(" + regionInfoForFs.getEncodedName() + ")");
}
 
Example 34
Project: hadoop   File: ViewFileSystem.java   Source Code and License 5 votes vote down vote up
@Override
public short getDefaultReplication(Path f) {
  try {
    InodeTree.ResolveResult<FileSystem> res =
      fsState.resolve(getUriPath(f), true);
    return res.targetFileSystem.getDefaultReplication(res.remainingPath);
  } catch (FileNotFoundException e) {
    throw new NotInMountpointException(f, "getDefaultReplication"); 
  }
}
 
Example 35
Project: hadoop   File: TestDynamicInputFormat.java   Source Code and License 5 votes vote down vote up
private static DistCpOptions getOptions() throws Exception {
  Path sourcePath = new Path(cluster.getFileSystem().getUri().toString()
          + "/tmp/source");
  Path targetPath = new Path(cluster.getFileSystem().getUri().toString()
          + "/tmp/target");

  List<Path> sourceList = new ArrayList<Path>();
  sourceList.add(sourcePath);
  DistCpOptions options = new DistCpOptions(sourceList, targetPath);
  options.setMaxMaps(NUM_SPLITS);
  return options;
}
 
Example 36
Project: LDA   File: InitMapper.java   Source Code and License 5 votes vote down vote up
@Override
protected void setup(Context context) throws IOException, InterruptedException {
    super.setup(context);
    Configuration conf = context.getConfiguration();
    this.K = Integer.parseInt(conf.get("K"));
    ReadIndexFromFile(new Path(conf.get(Job.indexFile)), conf);
    docToTopic = new Matrix<Integer>(M, K, 0);
    topicToWord = new Matrix<Integer>(K, V, 0);
}
 
Example 37
Project: hadoop-oss   File: ChRootedFs.java   Source Code and License 5 votes vote down vote up
@Override
public void createSymlink(final Path target, final Path link,
    final boolean createParent) throws IOException, UnresolvedLinkException {
  /*
   * We leave the link alone:
   * If qualified or link relative then of course it is okay.
   * If absolute (ie / relative) then the link has to be resolved
   * relative to the changed root.
   */
  myFs.createSymlink(fullPath(target), link, createParent);
}
 
Example 38
Project: hadoop   File: DistTool.java   Source Code and License 5 votes vote down vote up
protected static List<String> readFile(Configuration conf, Path inputfile
    ) throws IOException {
  List<String> result = new ArrayList<String>();
  FileSystem fs = inputfile.getFileSystem(conf);
  try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.open(inputfile),
          Charset.forName("UTF-8")))) {
    for(String line; (line = input.readLine()) != null;) {
      result.add(line);
    }
  }
  return result;
}
 
Example 39
Project: hadoop-oss   File: LocalDirAllocator.java   Source Code and License 5 votes vote down vote up
/**
 * Get all of the paths that currently exist in the working directories.
 * @param pathStr the path underneath the roots
 * @param conf the configuration to look up the roots in
 * @return all of the paths that exist under any of the roots
 * @throws IOException
 */
public Iterable<Path> getAllLocalPathsToRead(String pathStr, 
                                             Configuration conf
                                             ) throws IOException {
  AllocatorPerContext context;
  synchronized (this) {
    context = obtainContext(contextCfgItemName);
  }
  return context.getAllLocalPathsToRead(pathStr, conf);    
}
 
Example 40
Project: QDrill   File: TestParquetMetadataCache.java   Source Code and License 5 votes vote down vote up
@BeforeClass
public static void copyData() throws Exception {
  // copy the data into the temporary location
  String tmpLocation = getDfsTestTmpSchemaLocation();
  File dataDir = new File(tmpLocation + Path.SEPARATOR + tableName);
  dataDir.mkdir();
  FileUtils.copyDirectory(new File(String.format(String.format("%s/multilevel/parquet", TEST_RES_PATH))),
      dataDir);
}
 
Example 41
Project: hadoop-oss   File: ChRootedFs.java   Source Code and License 5 votes vote down vote up
@Override
public void setOwner(final Path f, final String username,
    final String groupname)
  throws IOException, UnresolvedLinkException {
  myFs.setOwner(fullPath(f), username, groupname);
  
}
 
Example 42
Project: ditb   File: HRegionFileSystem.java   Source Code and License 5 votes vote down vote up
/**
 * Return the store file information of the specified family/file.
 *
 * @param familyName Column Family Name
 * @param fileName   File Name
 * @return The {@link StoreFileInfo} for the specified family/file
 */
StoreFileInfo getStoreFileInfo(final String familyName, final String fileName)
    throws IOException {
  Path familyDir = getStoreDir(familyName);
  return ServerRegionReplicaUtil
      .getStoreFileInfo(conf, fs, regionInfo, regionInfoForFs, familyName,
          new Path(familyDir, fileName));
}
 
Example 43
Project: ditb   File: TestPrefixTree.java   Source Code and License 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  TableName tableName = TableName.valueOf(getClass().getSimpleName());
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor(fam).setDataBlockEncoding(DataBlockEncoding.PREFIX_TREE));
  HRegionInfo info = new HRegionInfo(tableName, null, null, false);
  Path path = testUtil.getDataTestDir(getClass().getSimpleName());
  region = HRegion.createHRegion(info, path, testUtil.getConfiguration(), htd);
}
 
Example 44
Project: dremio-oss   File: PathUtils.java   Source Code and License 5 votes vote down vote up
/**
 * Convert fs path relative to parent to dotted schema path.
 * parent: /a/b, child: /a/b/c/d -> c.d
 * @param parent parent path
 * @param child full path of child inside parent path
 * @return dotted schema name of child relative to parent.
 * @throws IOException if child does not belong under parent
 */
public static String toDottedPath(final Path parent, final Path child) throws IOException {
  final List<String> parentPathComponents = toPathComponents(parent);
  final List<String> childPathComponents = toPathComponents(child);
  for (int i = 0; i < parentPathComponents.size(); ++i) {
    if (!parentPathComponents.get(i).equals(childPathComponents.get(i))) {
      throw new IOException(String.format("Invalid file/directory %s listed under %s", child, parent));
    }
  }
  return constructFullPath(childPathComponents.subList(parentPathComponents.size(), childPathComponents.size()));
}
 
Example 45
Project: hadoop-oss   File: TestViewFsWithAuthorityLocalFs.java   Source Code and License 5 votes vote down vote up
@Override
@Test
public void testBasicPaths() {
    Assert.assertEquals(schemeWithAuthority,
        fcView.getDefaultFileSystem().getUri());
    Assert.assertEquals(fcView.makeQualified(
        new Path("/user/" + System.getProperty("user.name"))),
        fcView.getWorkingDirectory());
    Assert.assertEquals(fcView.makeQualified(
        new Path("/user/" + System.getProperty("user.name"))),
        fcView.getHomeDirectory());
    Assert.assertEquals(
        new Path("/foo/bar").makeQualified(schemeWithAuthority, null),
        fcView.makeQualified(new Path("/foo/bar")));
}
 
Example 46
Project: hdfs_to_cos_tools   File: HdfsFileToCosTask.java   Source Code and License 5 votes vote down vote up
private String ConvertHdfsPathToCosPath() throws IllegalArgumentException, IOException {
    String hdfsFolderPath = configReader.getSrcHdfsPath();
    if (configReader.getHdfsFS().getFileStatus(new Path(hdfsFolderPath)).isFile()) {
        hdfsFolderPath = hdfsFolderPath.substring(0, hdfsFolderPath.lastIndexOf("/"));
    }
    String hdfsFilePath = hdfsFileStatus.getPath().toString();
    String tmpPath = "";
    if (!hdfsFolderPath.endsWith("/")) {
        hdfsFolderPath += "/";
    }

    // hdfs folder path example: hdfs://222:111:333:444:8020/tmp/chengwu/
    if (hdfsFolderPath.startsWith("hdfs://")) {
        hdfsFolderPath =
                hdfsFolderPath.substring(hdfsFilePath.indexOf("/", "hdfs://".length()));
        // /tmp/chengwu/
    }

    // hdfs file path example: hdfs://222:111:333:444:8020/tmp/chengwu/xxx/yyy/len5M.txt

    if (hdfsFilePath.startsWith("hdfs://")) {
        hdfsFilePath = hdfsFilePath.substring(hdfsFilePath.indexOf("/", "hdfs://".length()));
        // /tmp/chengwu/xxx/yyy/len5M.txt
    }

    tmpPath = hdfsFilePath.substring(hdfsFolderPath.length());
    String cosFolder = configReader.getDestCosPath();
    if (!cosFolder.endsWith("/")) {
        cosFolder += "/";
    }
    tmpPath = cosFolder + tmpPath;
    if (!tmpPath.endsWith("/") && hdfsFileStatus.isDirectory()) {
        tmpPath += "/";
    }
    return tmpPath;
}
 
Example 47
Project: hadoop   File: HadoopArchives.java   Source Code and License 5 votes vote down vote up
public void configure(JobConf conf) {
  this.conf = conf;

  // this is tightly tied to map reduce
  // since it does not expose an api 
  // to get the partition
  partId = conf.getInt(MRJobConfig.TASK_PARTITION, -1);
  // create a file name using the partition
  // we need to write to this directory
  tmpOutputDir = FileOutputFormat.getWorkOutputPath(conf);
  blockSize = conf.getLong(HAR_BLOCKSIZE_LABEL, blockSize);
  // get the output path and write to the tmp 
  // directory 
  partname = "part-" + partId;
  tmpOutput = new Path(tmpOutputDir, partname);
  rootPath = (conf.get(SRC_PARENT_LABEL, null) == null) ? null :
              new Path(conf.get(SRC_PARENT_LABEL));
  if (rootPath == null) {
    throw new RuntimeException("Unable to read parent " +
    		"path for har from config");
  }
  try {
    destFs = tmpOutput.getFileSystem(conf);
    //this was a stale copy
    if (destFs.exists(tmpOutput)) {
      destFs.delete(tmpOutput, false);
    } 
    partStream = destFs.create(tmpOutput, false, conf.getInt("io.file.buffer.size", 4096), 
        destFs.getDefaultReplication(tmpOutput), blockSize);
  } catch(IOException ie) {
    throw new RuntimeException("Unable to open output file " + tmpOutput, ie);
  }
  buffer = new byte[buf_size];
}
 
Example 48
Project: aliyun-maxcompute-data-collectors   File: TestLobFile.java   Source Code and License 5 votes vote down vote up
public void testMultiRecords() throws Exception {
  runClobFileTest(new Path(TEMP_BASE_DIR, "multi.lob"),
      CodecMap.NONE,
      "this is the first record",
      "this is the second record. I assure you that this record is long.",
      "yet one more record graces this file.");
}
 
Example 49
Project: circus-train   File: DynamicInputChunk.java   Source Code and License 5 votes vote down vote up
private static void initializeChunkInvariants(Configuration config) throws IOException {
  configuration = config;
  Path listingFilePath = new Path(getListingFilePath(configuration));
  chunkRootPath = new Path(listingFilePath.getParent(), "chunkDir");
  fs = chunkRootPath.getFileSystem(configuration);
  chunkFilePrefix = listingFilePath.getName() + ".chunk.";
}
 
Example 50
Project: HadoopGuides   File: FileSystemDoubleCat.java   Source Code and License 5 votes vote down vote up
public static void main(String[] args) throws IOException {
    final String uri = "hdfs://master:8020/user/tom/aa.txt";
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(uri), conf);
    FSDataInputStream in = null;
    try {
        in = fs.open(new Path(uri));
        IOUtils.copyBytes(in, System.out, 4096, false);
        in.seek(0); // 回到文件开头
        IOUtils.copyBytes(in, System.out, 4096, false);
    } finally {
        IOUtils.closeStream(in);
    }
}
 
Example 51
Project: hadoop   File: ViewFs.java   Source Code and License 5 votes vote down vote up
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
    throws IOException {
  InodeTree.ResolveResult<AbstractFileSystem> res =
      fsState.resolve(getUriPath(path), true);
  res.targetFileSystem.modifyAclEntries(res.remainingPath, aclSpec);
}
 
Example 52
Project: hadoop   File: DistributedFileSystem.java   Source Code and License 5 votes vote down vote up
@Override
protected HdfsDataOutputStream primitiveCreate(Path f,
  FsPermission absolutePermission, EnumSet<CreateFlag> flag, int bufferSize,
  short replication, long blockSize, Progressable progress,
  ChecksumOpt checksumOpt) throws IOException {
  statistics.incrementWriteOps(1);
  final DFSOutputStream dfsos = dfs.primitiveCreate(
    getPathName(fixRelativePart(f)),
    absolutePermission, flag, true, replication, blockSize,
    progress, bufferSize, checksumOpt);
  return dfs.createWrappedOutputStream(dfsos, statistics);
}
 
Example 53
Project: hadoop   File: TestBookKeeperAsHASharedDir.java   Source Code and License 5 votes vote down vote up
private void assertCanStartHANameNodes(MiniDFSCluster cluster,
    Configuration conf, String path) throws ServiceFailedException,
    IOException, URISyntaxException, InterruptedException {
  // Now should be able to start both NNs. Pass "false" here so that we don't
  // try to waitActive on all NNs, since the second NN doesn't exist yet.
  cluster.restartNameNode(0, false);
  cluster.restartNameNode(1, true);

  // Make sure HA is working.
  cluster
      .getNameNode(0)
      .getRpcServer()
      .transitionToActive(
          new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
  FileSystem fs = null;
  try {
    Path newPath = new Path(path);
    fs = HATestUtil.configureFailoverFs(cluster, conf);
    assertTrue(fs.mkdirs(newPath));
    HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
        cluster.getNameNode(1));
    assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
        newPath.toString(), false).isDir());
  } finally {
    if (fs != null) {
      fs.close();
    }
  }
}
 
Example 54
Project: ditb   File: TestFSTableDescriptorForceCreation.java   Source Code and License 5 votes vote down vote up
@Test
public void testShouldAllowForcefulCreationOfAlreadyExistingTableDescriptor()
    throws Exception {
  final String name = "createNewTableNew2";
  FileSystem fs = FileSystem.get(UTIL.getConfiguration());
  Path rootdir = new Path(UTIL.getDataTestDir(), name);
  FSTableDescriptors fstd = new FSTableDescriptors(UTIL.getConfiguration(), fs, rootdir);
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(name));
  fstd.createTableDescriptor(htd, false);
  assertTrue("Should create new table descriptor",
      fstd.createTableDescriptor(htd, true));
}
 
Example 55
Project: ditb   File: TestLoadIncrementalHFilesSplitRecovery.java   Source Code and License 5 votes vote down vote up
/**
 * This test splits a table and attempts to bulk load.  The bulk import files
 * should be split before atomically importing.
 */
@Test (timeout=120000)
public void testGroupOrSplitPresplit() throws Exception {
  final TableName table = TableName.valueOf("groupOrSplitPresplit");
  try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
    setupTable(connection, table, 10);
    populateTable(connection, table, 1);
    assertExpectedTable(connection, table, ROWCOUNT, 1);
    forceSplit(table);

    final AtomicInteger countedLqis= new AtomicInteger();
    LoadIncrementalHFiles lih = new LoadIncrementalHFiles(
        util.getConfiguration()) {
      @Override
      protected List<LoadQueueItem> groupOrSplit(
          Multimap<ByteBuffer, LoadQueueItem> regionGroups,
          final LoadQueueItem item, final Table htable,
          final Pair<byte[][], byte[][]> startEndKeys) throws IOException {
        List<LoadQueueItem> lqis = super.groupOrSplit(regionGroups, item, htable, startEndKeys);
        if (lqis != null) {
          countedLqis.addAndGet(lqis.size());
        }
        return lqis;
      }
    };

    // create HFiles for different column families
    Path bulk = buildBulkFiles(table, 2);
    try (Table t = connection.getTable(table)) {
      lih.doBulkLoad(bulk, (HTable)t);
    }
    assertExpectedTable(connection, table, ROWCOUNT, 2);
    assertEquals(20, countedLqis.get());
  }
}
 
Example 56
Project: hadoop   File: CompositeInputFormat.java   Source Code and License 5 votes vote down vote up
/**
 * Convenience method for constructing composite formats.
 * Given operation (op), Object class (inf), set of paths (p) return:
 * {@code <op>(tbl(<inf>,<p1>),tbl(<inf>,<p2>),...,tbl(<inf>,<pn>)) }
 */
public static String compose(String op, Class<? extends InputFormat> inf,
    Path... path) {
  ArrayList<String> tmp = new ArrayList<String>(path.length);
  for (Path p : path) {
    tmp.add(p.toString());
  }
  return compose(op, inf, tmp.toArray(new String[0]));
}
 
Example 57
Project: hadoop   File: AbstractContractRootDirectoryTest.java   Source Code and License 5 votes vote down vote up
@Test
public void testRmEmptyRootDirNonRecursive() throws Throwable {
  //extra sanity checks here to avoid support calls about complete loss of data
  skipIfUnsupported(TEST_ROOT_TESTS_ENABLED);
  Path root = new Path("/");
  ContractTestUtils.assertIsDirectory(getFileSystem(), root);
  boolean deleted = getFileSystem().delete(root, true);
  LOG.info("rm / of empty dir result is {}", deleted);
  ContractTestUtils.assertIsDirectory(getFileSystem(), root);
}
 
Example 58
Project: alluxio   File: HdfsAndAlluxioUtils_update.java   Source Code and License 5 votes vote down vote up
/**
 * 此方法用于查看文件中损坏的块
 *
 * @param fileSystemInfo
 *            文件系统信息
 * @param target
 *            目标文件路径
 * @param link
 *            link文件路径
 * @param createParent
 *            是否创建父目录
 */
public static void createSymlink(FileSystemInfo fileSystemInfo, String target, String link, boolean createParent) {
	FileSystem fs = getFileSystem(fileSystemInfo);
	Path urit = new Path(target);
	Path uril = new Path(link);
	try {
		pathNotExistCheck(target, fs, urit);
		fs.createSymlink(urit, uril, createParent);
	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		closeFileSystem(fs);
	}
}
 
Example 59
Project: dremio-oss   File: PathUtils.java   Source Code and License 5 votes vote down vote up
/**
 * Convert fully or partially dotted path to file system path.
 * a.b.c   -> /a/b/c
 * a.`b/c` -> /a/b/c
 * @param path a fully or partially dotted path
 * @return {@code fs Path}
 */
public static Path toFSPath(String path) {
  final List<String> pathComponents = Lists.newArrayList();
  for (String component: parseFullPath(path)) {
    if (component.contains(SLASH)) {
      pathComponents.addAll(toPathComponents(new Path(component)));
    } else {
      pathComponents.add(component);
    }
  }
  return toFSPath(pathComponents);
}
 
Example 60
Project: hadoop   File: TestTaskAttempt.java   Source Code and License 5 votes vote down vote up
private TaskAttemptImpl createMapTaskAttemptImplForTest(
    EventHandler eventHandler, TaskSplitMetaInfo taskSplitMetaInfo, Clock clock) {
  ApplicationId appId = ApplicationId.newInstance(1, 1);
  JobId jobId = MRBuilderUtils.newJobId(appId, 1);
  TaskId taskId = MRBuilderUtils.newTaskId(jobId, 1, TaskType.MAP);
  TaskAttemptListener taListener = mock(TaskAttemptListener.class);
  Path jobFile = mock(Path.class);
  JobConf jobConf = new JobConf();
  TaskAttemptImpl taImpl =
      new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1,
          taskSplitMetaInfo, jobConf, taListener, null,
          null, clock, null);
  return taImpl;
}