Java Code Examples for org.apache.hadoop.fs.Trash#moveToTrash()

The following examples show how to use org.apache.hadoop.fs.Trash#moveToTrash() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HadoopUtilsTest.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
@Test
public void testMoveToTrash() throws IOException {
  Path hadoopUtilsTestDir = new Path(Files.createTempDir().getAbsolutePath(), "HadoopUtilsTestDir");
  Configuration conf = new Configuration();
  // Set the time to keep it in trash to 10 minutes.
  // 0 means object will be deleted instantly.
  conf.set("fs.trash.interval", "10");
  FileSystem fs = FileSystem.getLocal(conf);
  Trash trash = new Trash(fs, conf);
  TrashPolicy trashPolicy = TrashPolicy.getInstance(conf, fs, fs.getHomeDirectory());
  Path trashPath = trashPolicy.getCurrentTrashDir();

  fs.mkdirs(hadoopUtilsTestDir);
  Assert.assertTrue(fs.exists(hadoopUtilsTestDir));
  trash.moveToTrash(hadoopUtilsTestDir.getParent());
  Assert.assertFalse(fs.exists(hadoopUtilsTestDir));
  Assert.assertTrue(fs.exists(trashPath));
}
 
Example 2
Source File: TestTrash.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public static void trashNonDefaultFS(Configuration conf) throws IOException {
  conf.set("fs.trash.interval", "10"); // 10 minute
  // attempt non-default FileSystem trash
  {
    final FileSystem lfs = FileSystem.getLocal(conf);
    Path p = TEST_DIR;
    Path f = new Path(p, "foo/bar");
    if (lfs.exists(p)) {
      lfs.delete(p, true);
    }
    try {
      f = writeFile(lfs, f);

      FileSystem.closeAll();
      FileSystem localFs = FileSystem.get(URI.create("file:///"), conf);
      Trash lTrash = new Trash(localFs, conf);
      lTrash.moveToTrash(f.getParent());
      checkTrash(localFs, lTrash.getCurrentTrashDir(), f);
    } finally {
      if (lfs.exists(p)) {
        lfs.delete(p, true);
      }
    }
  }
}
 
Example 3
Source File: TestTrash.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public static void trashNonDefaultFS(Configuration conf) throws IOException {
  conf.set("fs.trash.interval", "10"); // 10 minute
  // attempt non-default FileSystem trash
  {
    final FileSystem lfs = FileSystem.getLocal(conf);
    Path p = TEST_DIR;
    Path f = new Path(p, "foo/bar");
    if (lfs.exists(p)) {
      lfs.delete(p, true);
    }
    try {
      f = writeFile(lfs, f);

      FileSystem.closeAll();
      FileSystem localFs = FileSystem.get(URI.create("file:///"), conf);
      Trash lTrash = new Trash(localFs, conf);
      lTrash.moveToTrash(f.getParent());
      checkTrash(localFs, lTrash.getCurrentTrashDir(), f);
    } finally {
      if (lfs.exists(p)) {
        lfs.delete(p, true);
      }
    }
  }
}
 
Example 4
Source File: ShardWriter.java    From linden with Apache License 2.0 5 votes vote down vote up
public static void moveToTrash(Configuration conf, Path path) throws IOException {
  Trash t = new Trash(conf);
  boolean isMoved = t.moveToTrash(path);
  t.expunge();
  if (!isMoved) {
    logger.error("Trash is not enabled or file is already in the trash.");
  }
}
 
Example 5
Source File: DistCpV1.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Delete the dst files/dirs which do not exist in src
 * 
 * @return total count of files and directories deleted from destination
 * @throws IOException
 */
static private long deleteNonexisting(
    FileSystem dstfs, FileStatus dstroot, Path dstsorted,
    FileSystem jobfs, Path jobdir, JobConf jobconf, Configuration conf
    ) throws IOException {
  if (dstroot.isFile()) {
    throw new IOException("dst must be a directory when option "
        + Options.DELETE.cmd + " is set, but dst (= " + dstroot.getPath()
        + ") is not a directory.");
  }

  //write dst lsr results
  final Path dstlsr = new Path(jobdir, "_distcp_dst_lsr");
  try (final SequenceFile.Writer writer = SequenceFile.createWriter(jobconf,
      Writer.file(dstlsr), Writer.keyClass(Text.class),
      Writer.valueClass(NullWritable.class), Writer.compression(
      SequenceFile.CompressionType.NONE))) {
    //do lsr to get all file statuses in dstroot
    final Stack<FileStatus> lsrstack = new Stack<FileStatus>();
    for(lsrstack.push(dstroot); !lsrstack.isEmpty(); ) {
      final FileStatus status = lsrstack.pop();
      if (status.isDirectory()) {
        for(FileStatus child : dstfs.listStatus(status.getPath())) {
          String relative = makeRelative(dstroot.getPath(), child.getPath());
          writer.append(new Text(relative), NullWritable.get());
          lsrstack.push(child);
        }
      }
    }
  }

  //sort lsr results
  final Path sortedlsr = new Path(jobdir, "_distcp_dst_lsr_sorted");
  SequenceFile.Sorter sorter = new SequenceFile.Sorter(jobfs,
      new Text.Comparator(), Text.class, NullWritable.class, jobconf);
  sorter.sort(dstlsr, sortedlsr);

  //compare lsr list and dst list  
  long deletedPathsCount = 0;
  try (SequenceFile.Reader lsrin =
           new SequenceFile.Reader(jobconf, Reader.file(sortedlsr));
       SequenceFile.Reader  dstin =
           new SequenceFile.Reader(jobconf, Reader.file(dstsorted))) {
    //compare sorted lsr list and sorted dst list
    final Text lsrpath = new Text();
    final Text dstpath = new Text();
    final Text dstfrom = new Text();
    final Trash trash = new Trash(dstfs, conf);
    Path lastpath = null;

    boolean hasnext = dstin.next(dstpath, dstfrom);
    while (lsrin.next(lsrpath, NullWritable.get())) {
      int dst_cmp_lsr = dstpath.compareTo(lsrpath);
      while (hasnext && dst_cmp_lsr < 0) {
        hasnext = dstin.next(dstpath, dstfrom);
        dst_cmp_lsr = dstpath.compareTo(lsrpath);
      }
      
      if (dst_cmp_lsr == 0) {
        //lsrpath exists in dst, skip it
        hasnext = dstin.next(dstpath, dstfrom);
      } else {
        //lsrpath does not exist, delete it
        final Path rmpath = new Path(dstroot.getPath(), lsrpath.toString());
        ++deletedPathsCount;
        if ((lastpath == null || !isAncestorPath(lastpath, rmpath))) {
          if (!(trash.moveToTrash(rmpath) || dstfs.delete(rmpath, true))) {
            throw new IOException("Failed to delete " + rmpath);
          }
          lastpath = rmpath;
        }
      }
    }
  }
  return deletedPathsCount;
}
 
Example 6
Source File: DistCpV1.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Delete the dst files/dirs which do not exist in src
 * 
 * @return total count of files and directories deleted from destination
 * @throws IOException
 */
static private long deleteNonexisting(
    FileSystem dstfs, FileStatus dstroot, Path dstsorted,
    FileSystem jobfs, Path jobdir, JobConf jobconf, Configuration conf
    ) throws IOException {
  if (dstroot.isFile()) {
    throw new IOException("dst must be a directory when option "
        + Options.DELETE.cmd + " is set, but dst (= " + dstroot.getPath()
        + ") is not a directory.");
  }

  //write dst lsr results
  final Path dstlsr = new Path(jobdir, "_distcp_dst_lsr");
  try (final SequenceFile.Writer writer = SequenceFile.createWriter(jobconf,
      Writer.file(dstlsr), Writer.keyClass(Text.class),
      Writer.valueClass(NullWritable.class), Writer.compression(
      SequenceFile.CompressionType.NONE))) {
    //do lsr to get all file statuses in dstroot
    final Stack<FileStatus> lsrstack = new Stack<FileStatus>();
    for(lsrstack.push(dstroot); !lsrstack.isEmpty(); ) {
      final FileStatus status = lsrstack.pop();
      if (status.isDirectory()) {
        for(FileStatus child : dstfs.listStatus(status.getPath())) {
          String relative = makeRelative(dstroot.getPath(), child.getPath());
          writer.append(new Text(relative), NullWritable.get());
          lsrstack.push(child);
        }
      }
    }
  }

  //sort lsr results
  final Path sortedlsr = new Path(jobdir, "_distcp_dst_lsr_sorted");
  SequenceFile.Sorter sorter = new SequenceFile.Sorter(jobfs,
      new Text.Comparator(), Text.class, NullWritable.class, jobconf);
  sorter.sort(dstlsr, sortedlsr);

  //compare lsr list and dst list  
  long deletedPathsCount = 0;
  try (SequenceFile.Reader lsrin =
           new SequenceFile.Reader(jobconf, Reader.file(sortedlsr));
       SequenceFile.Reader  dstin =
           new SequenceFile.Reader(jobconf, Reader.file(dstsorted))) {
    //compare sorted lsr list and sorted dst list
    final Text lsrpath = new Text();
    final Text dstpath = new Text();
    final Text dstfrom = new Text();
    final Trash trash = new Trash(dstfs, conf);
    Path lastpath = null;

    boolean hasnext = dstin.next(dstpath, dstfrom);
    while (lsrin.next(lsrpath, NullWritable.get())) {
      int dst_cmp_lsr = dstpath.compareTo(lsrpath);
      while (hasnext && dst_cmp_lsr < 0) {
        hasnext = dstin.next(dstpath, dstfrom);
        dst_cmp_lsr = dstpath.compareTo(lsrpath);
      }
      
      if (dst_cmp_lsr == 0) {
        //lsrpath exists in dst, skip it
        hasnext = dstin.next(dstpath, dstfrom);
      } else {
        //lsrpath does not exist, delete it
        final Path rmpath = new Path(dstroot.getPath(), lsrpath.toString());
        ++deletedPathsCount;
        if ((lastpath == null || !isAncestorPath(lastpath, rmpath))) {
          if (!(trash.moveToTrash(rmpath) || dstfs.delete(rmpath, true))) {
            throw new IOException("Failed to delete " + rmpath);
          }
          lastpath = rmpath;
        }
      }
    }
  }
  return deletedPathsCount;
}
 
Example 7
Source File: HadoopUtils.java    From incubator-gobblin with Apache License 2.0 2 votes vote down vote up
/**
 * Moves the object to the filesystem trash according to the file system policy.
 * @param fs FileSystem object
 * @param path Path to the object to be moved to trash.
 * @throws IOException
 */
public static void moveToTrash(FileSystem fs, Path path) throws IOException {
  Trash trash = new Trash(fs, new Configuration());
  trash.moveToTrash(path);
}