Java Code Examples for org.apache.hadoop.fs.FileSystem.deleteOnExit()

The following are Jave code examples for showing how to use deleteOnExit() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: ditb   File: HFileOutputFormat2.java   Source Code and License Vote up 7 votes
/**
 * Configure <code>job</code> with a TotalOrderPartitioner, partitioning against
 * <code>splitPoints</code>. Cleans up the partitions file after job exists.
 */
static void configurePartitioner(Job job, List<ImmutableBytesWritable> splitPoints)
    throws IOException {
  Configuration conf = job.getConfiguration();
  // create the partitions file
  FileSystem fs = FileSystem.get(conf);
  String hbaseTmpFsDir =
      conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
        HConstants.DEFAULT_TEMPORARY_HDFS_DIRECTORY);
  Path partitionsPath = new Path(hbaseTmpFsDir, "partitions_" + UUID.randomUUID());
  fs.makeQualified(partitionsPath);
  writePartitions(conf, partitionsPath, splitPoints);
  fs.deleteOnExit(partitionsPath);

  // configure job to use it
  job.setPartitionerClass(TotalOrderPartitioner.class);
  TotalOrderPartitioner.setPartitionFile(conf, partitionsPath);
}
 
Example 2
Project: Hydrograph   File: LingualSchemaCreatorTest.java   Source Code and License Vote up 6 votes
@AfterClass
public static void cleanUp() {
	System.gc();
	Configuration configuration = new Configuration();
	FileSystem fileSystem = null;

	try {
		fileSystem = FileSystem.get(configuration);
		Path deletingFilePath = new Path("testData/MetaData/");
		if (!fileSystem.exists(deletingFilePath)) {
			throw new PathNotFoundException(deletingFilePath.toString());
		} else {

			boolean isDeleted = fileSystem.delete(deletingFilePath, true);
			if (isDeleted) {
				fileSystem.deleteOnExit(deletingFilePath);
			}
		}
		fileSystem.close();
	} catch (IOException e) {
		e.printStackTrace();
	}
}
 
Example 3
Project: scheduling-connector-for-hadoop   File: PBSApplicationMaster.java   Source Code and License Vote up 6 votes
@Override
public ApplicationMasterRegisterResponse registerApplicationMaster(
    ApplicationMasterRegisterRequest request) throws IOException {
  String amHost = request.getHost();
  int amRpcPort = request.getPort();
  String trackingUrl = request.getTrackingUrl();
  
  int jobid = appAttemptId.getApplicationId().getId();

  String jobStatusFileName = jobid + "__" + amRpcPort + "__" + amHost + "__"
      + URLEncoder.encode(trackingUrl, HPCConfiguration.CHAR_ENCODING);
  String jobStatusLocation = conf.get(
      YARN_APPLICATION_HPC_PBS_JOB_STATUS_FILES_LOCATION,
      DEFAULT_YARN_APPLICATION_HPC_PBS_JOB_STATUS_FILES_LOCATION);
  FileSystem fileSystem = FileSystem.get(conf);
  Path statusFile = new Path(jobStatusLocation, jobStatusFileName);
  fileSystem.createNewFile(statusFile);
  fileSystem.deleteOnExit(statusFile);

  ApplicationMasterRegisterResponse response = new ApplicationMasterRegisterResponse();
  response.setMaxCapability(getMaxCapability());
  response.setQueue("default");
  return response;
}
 
Example 4
Project: WIFIProbe   File: HDFSTool.java   Source Code and License Vote up 5 votes
/**从HDFS上删除文件*/
public static void deleteFromHdfs(String fileName) throws IOException {
    String dst = NodeConfig.HDFS_PATH + fileName;
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(dst), conf);
    fs.deleteOnExit(new Path(dst));
    fs.close();
}
 
Example 5
Project: WIFIProbe   File: HDFSTool.java   Source Code and License Vote up 5 votes
/**从HDFS上删除文件*/
public static void deleteFromHdfs(String fileName) throws IOException {
    String dst = NodeConfig.HDFS_PATH + fileName;
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(dst), conf);
    fs.deleteOnExit(new Path(dst));
    fs.close();
}
 
Example 6
Project: hadoop   File: CleanerService.java   Source Code and License Vote up 5 votes
/**
 * To ensure there are not multiple instances of the SCM running on a given
 * cluster, a global pid file is used. This file contains the hostname of the
 * machine that owns the pid file.
 *
 * @return true if the pid file was written, false otherwise
 * @throws YarnException
 */
private boolean writeGlobalCleanerPidFile() throws YarnException {
  String root =
      conf.get(YarnConfiguration.SHARED_CACHE_ROOT,
          YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT);
  Path pidPath = new Path(root, GLOBAL_CLEANER_PID);
  try {
    FileSystem fs = FileSystem.get(this.conf);

    if (fs.exists(pidPath)) {
      return false;
    }

    FSDataOutputStream os = fs.create(pidPath, false);
    // write the hostname and the process id in the global cleaner pid file
    final String ID = ManagementFactory.getRuntimeMXBean().getName();
    os.writeUTF(ID);
    os.close();
    // add it to the delete-on-exit to ensure it gets deleted when the JVM
    // exits
    fs.deleteOnExit(pidPath);
  } catch (IOException e) {
    throw new YarnException(e);
  }
  LOG.info("Created the global cleaner pid file at " + pidPath.toString());
  return true;
}
 
Example 7
Project: dremio-oss   File: HomeFileConfig.java   Source Code and License Vote up 5 votes
public FileSystem createFileSystem() throws IOException {
  FileSystem fs = FileSystemWrapper.get(location, fsConf);
  fs.mkdirs(new Path(location.getPath()), DEFAULT_PERMISSIONS);
  fs.mkdirs(stagingDir, DEFAULT_PERMISSIONS);
  fs.mkdirs(uploadsDir, DEFAULT_PERMISSIONS);
  fs.deleteOnExit(stagingDir);
  return fs;
}
 
Example 8
Project: ditb   File: IntegrationTestLoadAndVerify.java   Source Code and License Vote up 5 votes
public Path getTestDir(String testName, String subdir) throws IOException {
  Path testDir = util.getDataTestDirOnTestFS(testName);
  FileSystem fs = FileSystem.get(getConf());
  fs.deleteOnExit(testDir);

  return new Path(new Path(testDir, testName), subdir);
}
 
Example 9
Project: hadoop   File: TestFileCreation.java   Source Code and License Vote up 4 votes
/**
 * Test deleteOnExit
 */
@Test
public void testDeleteOnExit() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  FileSystem localfs = FileSystem.getLocal(conf);

  try {

    // Creates files in HDFS and local file system.
    //
    Path file1 = new Path("filestatus.dat");
    Path file2 = new Path("filestatus2.dat");
    Path file3 = new Path("filestatus3.dat");
    FSDataOutputStream stm1 = createFile(fs, file1, 1);
    FSDataOutputStream stm2 = createFile(fs, file2, 1);
    FSDataOutputStream stm3 = createFile(localfs, file3, 1);
    System.out.println("DeleteOnExit: Created files.");

    // write to files and close. Purposely, do not close file2.
    writeFile(stm1);
    writeFile(stm3);
    stm1.close();
    stm2.close();
    stm3.close();

    // set delete on exit flag on files.
    fs.deleteOnExit(file1);
    fs.deleteOnExit(file2);
    localfs.deleteOnExit(file3);

    // close the file system. This should make the above files
    // disappear.
    fs.close();
    localfs.close();
    fs = null;
    localfs = null;

    // reopen file system and verify that file does not exist.
    fs = cluster.getFileSystem();
    localfs = FileSystem.getLocal(conf);

    assertTrue(file1 + " still exists inspite of deletOnExit set.",
               !fs.exists(file1));
    assertTrue(file2 + " still exists inspite of deletOnExit set.",
               !fs.exists(file2));
    assertTrue(file3 + " still exists inspite of deletOnExit set.",
               !localfs.exists(file3));
    System.out.println("DeleteOnExit successful.");

  } finally {
    IOUtils.closeStream(fs);
    IOUtils.closeStream(localfs);
    cluster.shutdown();
  }
}
 
Example 10
Project: hadoop   File: DistributedCacheEmulator.java   Source Code and License Vote up 4 votes
/**
 * Write the list of distributed cache files in the decreasing order of
 * file sizes into the sequence file. This file will be input to the job
 * {@link GenerateDistCacheData}.
 * Also validates if -generate option is missing and distributed cache files
 * are missing.
 * @return exit code
 * @throws IOException
 */
private int writeDistCacheFilesList()
    throws IOException {
  // Sort the distributed cache files in the decreasing order of file sizes.
  List dcFiles = new ArrayList(distCacheFiles.entrySet());
  Collections.sort(dcFiles, new Comparator() {
    public int compare(Object dc1, Object dc2) {
      return ((Comparable) ((Map.Entry) (dc2)).getValue())
          .compareTo(((Map.Entry) (dc1)).getValue());
    }
  });

  // write the sorted distributed cache files to the sequence file
  FileSystem fs = FileSystem.get(conf);
  Path distCacheFilesList = new Path(distCachePath, "_distCacheFiles.txt");
  conf.set(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_LIST,
      distCacheFilesList.toString());
  SequenceFile.Writer src_writer = SequenceFile.createWriter(fs, conf,
      distCacheFilesList, LongWritable.class, BytesWritable.class,
      SequenceFile.CompressionType.NONE);

  // Total number of unique distributed cache files
  int fileCount = dcFiles.size();
  long byteCount = 0;// Total size of all distributed cache files
  long bytesSync = 0;// Bytes after previous sync;used to add sync marker

  for (Iterator it = dcFiles.iterator(); it.hasNext();) {
    Map.Entry entry = (Map.Entry)it.next();
    LongWritable fileSize =
        new LongWritable(Long.parseLong(entry.getValue().toString()));
    BytesWritable filePath =
        new BytesWritable(
        entry.getKey().toString().getBytes(charsetUTF8));

    byteCount += fileSize.get();
    bytesSync += fileSize.get();
    if (bytesSync > AVG_BYTES_PER_MAP) {
      src_writer.sync();
      bytesSync = fileSize.get();
    }
    src_writer.append(fileSize, filePath);
  }
  if (src_writer != null) {
    src_writer.close();
  }
  // Set delete on exit for 'dist cache files list' as it is not needed later.
  fs.deleteOnExit(distCacheFilesList);

  conf.setInt(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_COUNT, fileCount);
  conf.setLong(GenerateDistCacheData.GRIDMIX_DISTCACHE_BYTE_COUNT, byteCount);
  LOG.info("Number of HDFS based distributed cache files to be generated is "
      + fileCount + ". Total size of HDFS based distributed cache files "
      + "to be generated is " + byteCount);

  if (!shouldGenerateDistCacheData() && fileCount > 0) {
    LOG.error("Missing " + fileCount + " distributed cache files under the "
        + " directory\n" + distCachePath + "\nthat are needed for gridmix"
        + " to emulate distributed cache load. Either use -generate\noption"
        + " to generate distributed cache data along with input data OR "
        + "disable\ndistributed cache emulation by configuring '"
        + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
        + "' to false.");
    return Gridmix.MISSING_DIST_CACHE_FILES_ERROR;
  }
  return 0;
}