Java Code Examples for org.apache.hadoop.fs.FileSystem#deleteOnExit()

The following examples show how to use org.apache.hadoop.fs.FileSystem#deleteOnExit() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MultiHfileOutputFormat.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * Configure <code>job</code> with a TotalOrderPartitioner, partitioning against
 * <code>splitPoints</code>. Cleans up the partitions file after job exists.
 */
static void configurePartitioner(Job job, Set<TableRowkeyPair> tablesStartKeys)
        throws IOException {
    
    Configuration conf = job.getConfiguration();
    // create the partitions file
    Path partitionsPath = new Path(conf.get("hadoop.tmp.dir"), "partitions_" + UUID.randomUUID());
    FileSystem fs = partitionsPath.getFileSystem(conf);
    fs.makeQualified(partitionsPath);
    writePartitions(conf, partitionsPath, tablesStartKeys);
    fs.deleteOnExit(partitionsPath);

    // configure job to use it
    job.setPartitionerClass(TotalOrderPartitioner.class);
    TotalOrderPartitioner.setPartitionFile(conf, partitionsPath);
}
 
Example 2
Source File: TestDirectoryTraversal.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void verifyFileRetrival(Path root, FileSystem fs) throws Exception {
  for (int i = 0; i < NUM_TESTS; ++i) {
    fs.delete(root, true);
    fs.mkdirs(root);
    fs.deleteOnExit(root);
    try {
      dirsCreated.clear();
      filesCreated.clear();
      createDirectoryTree(root, 5, 5, 0.3, 0.3, fs);
      LOG.info("Files created:" + filesCreated.size());
      DirectoryTraversal dt =
        DirectoryTraversal.fileRetriever(Arrays.asList(root), fs, 5, true, true);
      FileStatus file;
      int dirCount = 0;
      while ((file = dt.next()) != DirectoryTraversal.FINISH_TOKEN) {
        LOG.info("Get " + file.getPath().toString().replace(TEST_DIR, ""));
        dirCount += 1;
        String name = getSimpleName(file);
        assertTrue(filesCreated.remove(name));
      }
      assertEquals(0, filesCreated.size());
    } finally {
      fs.delete(root, true);
    }
  }
}
 
Example 3
Source File: HiveTableSink.java    From flink with Apache License 2.0 5 votes vote down vote up
private String toStagingDir(String finalDir, Configuration conf) throws IOException {
	String res = finalDir;
	if (!finalDir.endsWith(Path.SEPARATOR)) {
		res += Path.SEPARATOR;
	}
	// TODO: may append something more meaningful than a timestamp, like query ID
	res += ".staging_" + System.currentTimeMillis();
	Path path = new Path(res);
	FileSystem fs = path.getFileSystem(conf);
	Preconditions.checkState(fs.exists(path) || fs.mkdirs(path), "Failed to create staging dir " + path);
	fs.deleteOnExit(path);
	return res;
}
 
Example 4
Source File: TestCreateIndex.java    From tajo with Apache License 2.0 5 votes vote down vote up
private static void assertIndexNotExist(String databaseName, String indexName) throws IOException {
  Path indexPath = new Path(conf.getVar(ConfVars.WAREHOUSE_DIR), databaseName + "/" + indexName);
  FileSystem fs = indexPath.getFileSystem(conf);
  if (fs.exists(indexPath)) {
    fs.deleteOnExit(indexPath);
    assertFalse("Index is not deleted from the file system.", true);
  }
}
 
Example 5
Source File: HdfsUtil.java    From spring-boot-tutorial with Creative Commons Attribution Share Alike 4.0 International 5 votes vote down vote up
/**
 * 删除文件
 *
 * @param path
 * @return true / false
 * @throws Exception
 */
public boolean deleteOnExit(@NotBlank String path) throws Exception {
    FileSystem fileSystem = null;
    try {
        fileSystem = this.hdfsPool.borrowObject();
        return fileSystem.deleteOnExit(new Path(path));
    } finally {
        if (fileSystem != null) { this.hdfsPool.returnObject(fileSystem); }
    }
}
 
Example 6
Source File: BulkInputFormat.java    From datawave with Apache License 2.0 5 votes vote down vote up
/**
 * Initialize the user, table, and authorization information for the configuration object that will be used with an Accumulo InputFormat.
 * 
 * @param job
 *            the Hadoop Job object
 * @param user
 *            a valid accumulo user
 * @param passwd
 *            the user's password
 * @param table
 *            the table to read
 * @param auths
 *            the authorizations used to restrict data read
 */
public static void setInputInfo(Job job, String user, byte[] passwd, String table, Authorizations auths) {
    Configuration conf = job.getConfiguration();
    if (conf.getBoolean(INPUT_INFO_HAS_BEEN_SET, false))
        throw new IllegalStateException("Input info can only be set once per job");
    conf.setBoolean(INPUT_INFO_HAS_BEEN_SET, true);
    
    ArgumentChecker.notNull(user, passwd, table);
    conf.set(USERNAME, user);
    conf.set("accumulo.username", user);
    conf.set(TABLE_NAME, table);
    if (auths != null && !auths.isEmpty())
        conf.set(AUTHORIZATIONS, auths.toString());
    
    try {
        FileSystem fs = FileSystem.get(conf);
        String workingDirectory = conf.get(WORKING_DIRECTORY, fs.getWorkingDirectory().toString());
        Path work = new Path(workingDirectory);
        Path file = new Path(work, conf.get("mapreduce.job.name") + System.currentTimeMillis() + ".pw");
        conf.set(PASSWORD_PATH, file.toString());
        fs = FileSystem.get(file.toUri(), conf);
        byte[] encodedPw = Base64.encodeBase64(passwd);
        try (FSDataOutputStream fos = fs.create(file, false)) {
            fs.setPermission(file, new FsPermission(FsAction.ALL, FsAction.NONE, FsAction.NONE));
            fs.deleteOnExit(file);
            fos.writeInt(encodedPw.length);
            fos.write(encodedPw);
        }
        
        conf.set("accumulo.password", new String(encodedPw));
        
        job.addCacheFile(file.toUri());
    } catch (IOException ioe) {
        throw new RuntimeException(ioe);
    }
    
}
 
Example 7
Source File: GCPCredentialCopier.java    From circus-train with Apache License 2.0 5 votes vote down vote up
private void copyCredentialIntoHdfs(FileSystem fs, Path source, Path destination) throws IOException {
  /*
   * The Google credentials file must be present in HDFS so that the DistCP map reduce job can access it upon
   * replication.
   */
  Path destinationFolder = destination.getParent();
  fs.deleteOnExit(destinationFolder);
  LOG.debug("Copying credential into HDFS {}", destination);
  fs.copyFromLocalFile(source, destination);
}
 
Example 8
Source File: HDFSTool.java    From WIFIProbe with Apache License 2.0 5 votes vote down vote up
/**从HDFS上删除文件*/
public static void deleteFromHdfs(String fileName) throws IOException {
    String dst = NodeConfig.HDFS_PATH + fileName;
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(URI.create(dst), conf);
    fs.deleteOnExit(new Path(dst));
    fs.close();
}
 
Example 9
Source File: BackupUtils.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static Path getBulkOutputDir(String tableName, Configuration conf, boolean deleteOnExit)
    throws IOException {
  FileSystem fs = FileSystem.get(conf);
  String tmp = conf.get(HConstants.TEMPORARY_FS_DIRECTORY_KEY,
          fs.getHomeDirectory() + "/hbase-staging");
  Path path =
      new Path(tmp + Path.SEPARATOR + "bulk_output-" + tableName + "-"
          + EnvironmentEdgeManager.currentTime());
  if (deleteOnExit) {
    fs.deleteOnExit(path);
  }
  return path;
}
 
Example 10
Source File: CleanerService.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * To ensure there are not multiple instances of the SCM running on a given
 * cluster, a global pid file is used. This file contains the hostname of the
 * machine that owns the pid file.
 *
 * @return true if the pid file was written, false otherwise
 * @throws YarnException
 */
private boolean writeGlobalCleanerPidFile() throws YarnException {
  String root =
      conf.get(YarnConfiguration.SHARED_CACHE_ROOT,
          YarnConfiguration.DEFAULT_SHARED_CACHE_ROOT);
  Path pidPath = new Path(root, GLOBAL_CLEANER_PID);
  try {
    FileSystem fs = FileSystem.get(this.conf);

    if (fs.exists(pidPath)) {
      return false;
    }

    FSDataOutputStream os = fs.create(pidPath, false);
    // write the hostname and the process id in the global cleaner pid file
    final String ID = ManagementFactory.getRuntimeMXBean().getName();
    os.writeUTF(ID);
    os.close();
    // add it to the delete-on-exit to ensure it gets deleted when the JVM
    // exits
    fs.deleteOnExit(pidPath);
  } catch (IOException e) {
    throw new YarnException(e);
  }
  LOG.info("Created the global cleaner pid file at " + pidPath.toString());
  return true;
}
 
Example 11
Source File: GeneralFileActionDemo.java    From JavaBase with MIT License 5 votes vote down vote up
public static boolean deleteByURL(String url) throws IOException {
  FileSystem fs = FileSystem.get(getConfig(url));
  Path path = new Path(url);
  boolean isDeleted = fs.deleteOnExit(path);
  fs.close();
  return isDeleted;
}
 
Example 12
Source File: HDFS.java    From incubator-retired-pirk with Apache License 2.0 5 votes vote down vote up
public static void writeFile(Collection<String> elements, FileSystem fs, String path, boolean deleteOnExit)
{
  Path filePath = new Path(path);

  try
  {
    // create writer
    BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fs.create(filePath, true)));

    // write each element on a new line
    for (String element : elements)
    {
      bw.write(element);
      bw.newLine();
    }

    bw.close();

    // delete file once the filesystem is closed
    if (deleteOnExit)
    {
      fs.deleteOnExit(filePath);
    }

  } catch (IOException e)
  {
    e.printStackTrace();
  }
}
 
Example 13
Source File: HDFS.java    From incubator-retired-pirk with Apache License 2.0 5 votes vote down vote up
public static void writeFile(Map<String,Integer> sortedMap, FileSystem fs, String path, boolean deleteOnExit)
{
  Path filePath = new Path(path);

  try
  {
    // create writer
    BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fs.create(filePath, true)));

    // write each element on a new line
    for (Entry<String,Integer> entry : sortedMap.entrySet())
    {
      bw.write(entry.getKey() + "," + entry.getValue());
      bw.newLine();
    }
    bw.close();

    // delete file once the filesystem is closed
    if (deleteOnExit)
    {
      fs.deleteOnExit(filePath);
    }

  } catch (IOException e)
  {
    e.printStackTrace();
  }
}
 
Example 14
Source File: QueueOrchestrator.java    From sequenceiq-samples with Apache License 2.0 5 votes vote down vote up
public JobID submitJobsIntoQueues(String queueName, Path tempDir) throws Exception {
		
		Configuration priorityConf = this.getConfiguration(queueName);

/**
		list queue with YarnClient, not so useful
	
		YarnClient yarnClient = new YarnClientImpl();
        yarnClient.init(priorityConf);
        yarnClient.start();  
        
        List<QueueInfo> queues = yarnClient.getAllQueues();
        for (QueueInfo queueInfo : queues) {
			LOGGER.info("Queue Informations (name, capacity, current capacity): " + queueInfo.getQueueName() + " "+ queueInfo.getCapacity() + " " + queueInfo.getCurrentCapacity());
		} 
 */
 		
		
        FileSystem fs = FileSystem.get(priorityConf);
         
	    try {    
	    	// submit MR App to highPriority queue
		    JobID jobID = QuasiMonteCarlo.submitPiEstimationMRApp("PiEstimation into: " + queueName, 10, 3, tempDir, priorityConf);
		    
		    return jobID;
		    
	    } finally {
	    	fs.deleteOnExit(tempDir);
	    //	yarnClient.close();
	    	LOGGER.debug("Deleting temp dir and closign YARN client");
	    	
	    }
	}
 
Example 15
Source File: TestJobCounters.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Tests {@link TaskCounter}'s {@link TaskCounter.COMMITTED_HEAP_BYTES}. 
 * The test consists of running a low-memory job which consumes less heap 
 * memory and then running a high-memory job which consumes more heap memory, 
 * and then ensuring that COMMITTED_HEAP_BYTES of low-memory job is smaller 
 * than that of the high-memory job.
 * @throws IOException
 */
@Test
@SuppressWarnings("deprecation")
public void testHeapUsageCounter() throws Exception {
  JobConf conf = new JobConf();
  // create a local filesystem handle
  FileSystem fileSystem = FileSystem.getLocal(conf);
  
  // define test root directories
  Path rootDir =
    new Path(System.getProperty("test.build.data", "/tmp"));
  Path testRootDir = new Path(rootDir, "testHeapUsageCounter");
  // cleanup the test root directory
  fileSystem.delete(testRootDir, true);
  // set the current working directory
  fileSystem.setWorkingDirectory(testRootDir);
  
  fileSystem.deleteOnExit(testRootDir);
  
  // create a mini cluster using the local file system
  MiniMRCluster mrCluster = 
    new MiniMRCluster(1, fileSystem.getUri().toString(), 1);
  
  try {
    conf = mrCluster.createJobConf();
    JobClient jobClient = new JobClient(conf);

    // define job input
    Path inDir = new Path(testRootDir, "in");
    // create input data
    createWordsFile(inDir, conf);

    // configure and run a low memory job which will run without loading the
    // jvm's heap
    RunningJob lowMemJob = 
      runHeapUsageTestJob(conf, testRootDir, "-Xms32m -Xmx1G", 
                          0, 0, fileSystem, jobClient, inDir);
    JobID lowMemJobID = lowMemJob.getID();
    long lowMemJobMapHeapUsage = getTaskCounterUsage(jobClient, lowMemJobID, 
                                                     1, 0, TaskType.MAP);
    System.out.println("Job1 (low memory job) map task heap usage: " 
                       + lowMemJobMapHeapUsage);
    long lowMemJobReduceHeapUsage =
      getTaskCounterUsage(jobClient, lowMemJobID, 1, 0, TaskType.REDUCE);
    System.out.println("Job1 (low memory job) reduce task heap usage: " 
                       + lowMemJobReduceHeapUsage);

    // configure and run a high memory job which will load the jvm's heap
    RunningJob highMemJob = 
      runHeapUsageTestJob(conf, testRootDir, "-Xms32m -Xmx1G", 
                          lowMemJobMapHeapUsage + 256*1024*1024, 
                          lowMemJobReduceHeapUsage + 256*1024*1024,
                          fileSystem, jobClient, inDir);
    JobID highMemJobID = highMemJob.getID();

    long highMemJobMapHeapUsage = getTaskCounterUsage(jobClient, highMemJobID,
                                                      1, 0, TaskType.MAP);
    System.out.println("Job2 (high memory job) map task heap usage: " 
                       + highMemJobMapHeapUsage);
    long highMemJobReduceHeapUsage =
      getTaskCounterUsage(jobClient, highMemJobID, 1, 0, TaskType.REDUCE);
    System.out.println("Job2 (high memory job) reduce task heap usage: " 
                       + highMemJobReduceHeapUsage);

    assertTrue("Incorrect map heap usage reported by the map task", 
               lowMemJobMapHeapUsage < highMemJobMapHeapUsage);

    assertTrue("Incorrect reduce heap usage reported by the reduce task", 
               lowMemJobReduceHeapUsage < highMemJobReduceHeapUsage);
  } finally {
    // shutdown the mr cluster
    mrCluster.shutdown();
    try {
      fileSystem.delete(testRootDir, true);
    } catch (IOException ioe) {} 
  }
}
 
Example 16
Source File: TestFileCreation.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
 * Test deleteOnExit
 */
public void testDeleteOnExit() throws IOException {
  Configuration conf = new Configuration();
  if (simulatedStorage) {
    conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
  }
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  FileSystem fs = cluster.getFileSystem();
  FileSystem localfs = FileSystem.getLocal(conf);

  try {

    // Creates files in HDFS and local file system.
    //
    Path file1 = new Path("filestatus.dat");
    Path file2 = new Path("filestatus2.dat");
    Path file3 = new Path("filestatus3.dat");
    FSDataOutputStream stm1 = createFile(fs, file1, 1);
    FSDataOutputStream stm2 = createFile(fs, file2, 1);
    FSDataOutputStream stm3 = createFile(localfs, file3, 1);
    System.out.println("DeleteOnExit: Created files.");

    // write to files and close. Purposely, do not close file2.
    writeFile(stm1);
    writeFile(stm3);
    stm1.close();
    stm2.close();
    stm3.close();

    // set delete on exit flag on files.
    fs.deleteOnExit(file1);
    fs.deleteOnExit(file2);
    localfs.deleteOnExit(file3);

    // close the file system. This should make the above files
    // disappear.
    fs.close();
    localfs.close();
    fs = null;
    localfs = null;

    // reopen file system and verify that file does not exist.
    fs = cluster.getFileSystem();
    localfs = FileSystem.getLocal(conf);

    assertTrue(file1 + " still exists inspite of deletOnExit set.",
               !fs.exists(file1));
    assertTrue(file2 + " still exists inspite of deletOnExit set.",
               !fs.exists(file2));
    assertTrue(file3 + " still exists inspite of deletOnExit set.",
               !localfs.exists(file3));
    System.out.println("DeleteOnExit successful.");

  } finally {
    IOUtils.closeStream(fs);
    IOUtils.closeStream(localfs);
    cluster.shutdown();
  }
}
 
Example 17
Source File: TestTezClient.java    From tez with Apache License 2.0 4 votes vote down vote up
private void _testTezClientSessionLargeDAGPlan(int maxIPCMsgSize, int payloadSize, int amResourceSize,
                                             boolean shouldSerialize) throws Exception {
  TezConfiguration conf = new TezConfiguration();
  conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, maxIPCMsgSize);
  conf.set(TezConfiguration.TEZ_AM_STAGING_DIR, "target/"+this.getClass().getName());
  TezClientForTest client = configureAndCreateTezClient(null, true, conf);

  Map<String, LocalResource> localResourceMap = new HashMap<>();
  byte[] bytes = new byte[amResourceSize];
  Arrays.fill(bytes, (byte)1);
  String lrName = new String(bytes);
  localResourceMap.put(lrName, LocalResource.newInstance(URL.newInstance("file", "localhost", 0, "/test"),
      LocalResourceType.FILE, LocalResourceVisibility.PUBLIC, 1, 1));

  ProcessorDescriptor processorDescriptor = ProcessorDescriptor.create("P");
  processorDescriptor.setUserPayload(UserPayload.create(ByteBuffer.allocate(payloadSize)));
  Vertex vertex = Vertex.create("Vertex", processorDescriptor, 1, Resource.newInstance(1, 1));
  DAG dag = DAG.create("DAG").addVertex(vertex);

  client.start();
  client.addAppMasterLocalFiles(localResourceMap);
  client.submitDAG(dag);
  client.stop();

  ArgumentCaptor<SubmitDAGRequestProto> captor = ArgumentCaptor.forClass(SubmitDAGRequestProto.class);
  verify(client.sessionAmProxy).submitDAG((RpcController)any(), captor.capture());
  SubmitDAGRequestProto request = captor.getValue();

  if (shouldSerialize) {
    /* we need manually delete the serialized dagplan since staging path here won't be destroyed */
    Path dagPlanPath = new Path(request.getSerializedRequestPath());
    FileSystem fs = FileSystem.getLocal(conf);
    fs.deleteOnExit(dagPlanPath);
    fs.delete(dagPlanPath, false);

    assertTrue(request.hasSerializedRequestPath());
    assertFalse(request.hasDAGPlan());
    assertFalse(request.hasAdditionalAmResources());
  } else {
    assertFalse(request.hasSerializedRequestPath());
    assertTrue(request.hasDAGPlan());
    assertTrue(request.hasAdditionalAmResources());
  }
}
 
Example 18
Source File: DistributedCacheEmulator.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Write the list of distributed cache files in the decreasing order of
 * file sizes into the sequence file. This file will be input to the job
 * {@link GenerateDistCacheData}.
 * Also validates if -generate option is missing and distributed cache files
 * are missing.
 * @return exit code
 * @throws IOException
 */
private int writeDistCacheFilesList()
    throws IOException {
  // Sort the distributed cache files in the decreasing order of file sizes.
  List dcFiles = new ArrayList(distCacheFiles.entrySet());
  Collections.sort(dcFiles, new Comparator() {
    public int compare(Object dc1, Object dc2) {
      return ((Comparable) ((Map.Entry) (dc2)).getValue())
          .compareTo(((Map.Entry) (dc1)).getValue());
    }
  });

  // write the sorted distributed cache files to the sequence file
  FileSystem fs = FileSystem.get(conf);
  Path distCacheFilesList = new Path(distCachePath, "_distCacheFiles.txt");
  conf.set(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_LIST,
      distCacheFilesList.toString());
  SequenceFile.Writer src_writer = SequenceFile.createWriter(fs, conf,
      distCacheFilesList, LongWritable.class, BytesWritable.class,
      SequenceFile.CompressionType.NONE);

  // Total number of unique distributed cache files
  int fileCount = dcFiles.size();
  long byteCount = 0;// Total size of all distributed cache files
  long bytesSync = 0;// Bytes after previous sync;used to add sync marker

  for (Iterator it = dcFiles.iterator(); it.hasNext();) {
    Map.Entry entry = (Map.Entry)it.next();
    LongWritable fileSize =
        new LongWritable(Long.parseLong(entry.getValue().toString()));
    BytesWritable filePath =
        new BytesWritable(
        entry.getKey().toString().getBytes(charsetUTF8));

    byteCount += fileSize.get();
    bytesSync += fileSize.get();
    if (bytesSync > AVG_BYTES_PER_MAP) {
      src_writer.sync();
      bytesSync = fileSize.get();
    }
    src_writer.append(fileSize, filePath);
  }
  if (src_writer != null) {
    src_writer.close();
  }
  // Set delete on exit for 'dist cache files list' as it is not needed later.
  fs.deleteOnExit(distCacheFilesList);

  conf.setInt(GenerateDistCacheData.GRIDMIX_DISTCACHE_FILE_COUNT, fileCount);
  conf.setLong(GenerateDistCacheData.GRIDMIX_DISTCACHE_BYTE_COUNT, byteCount);
  LOG.info("Number of HDFS based distributed cache files to be generated is "
      + fileCount + ". Total size of HDFS based distributed cache files "
      + "to be generated is " + byteCount);

  if (!shouldGenerateDistCacheData() && fileCount > 0) {
    LOG.error("Missing " + fileCount + " distributed cache files under the "
        + " directory\n" + distCachePath + "\nthat are needed for gridmix"
        + " to emulate distributed cache load. Either use -generate\noption"
        + " to generate distributed cache data along with input data OR "
        + "disable\ndistributed cache emulation by configuring '"
        + DistributedCacheEmulator.GRIDMIX_EMULATE_DISTRIBUTEDCACHE
        + "' to false.");
    return Gridmix.MISSING_DIST_CACHE_FILES_ERROR;
  }
  return 0;
}
 
Example 19
Source File: TestFileCreation.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test deleteOnExit
 */
@Test
public void testDeleteOnExit() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  FileSystem localfs = FileSystem.getLocal(conf);

  try {

    // Creates files in HDFS and local file system.
    //
    Path file1 = new Path("filestatus.dat");
    Path file2 = new Path("filestatus2.dat");
    Path file3 = new Path("filestatus3.dat");
    FSDataOutputStream stm1 = createFile(fs, file1, 1);
    FSDataOutputStream stm2 = createFile(fs, file2, 1);
    FSDataOutputStream stm3 = createFile(localfs, file3, 1);
    System.out.println("DeleteOnExit: Created files.");

    // write to files and close. Purposely, do not close file2.
    writeFile(stm1);
    writeFile(stm3);
    stm1.close();
    stm2.close();
    stm3.close();

    // set delete on exit flag on files.
    fs.deleteOnExit(file1);
    fs.deleteOnExit(file2);
    localfs.deleteOnExit(file3);

    // close the file system. This should make the above files
    // disappear.
    fs.close();
    localfs.close();
    fs = null;
    localfs = null;

    // reopen file system and verify that file does not exist.
    fs = cluster.getFileSystem();
    localfs = FileSystem.getLocal(conf);

    assertTrue(file1 + " still exists inspite of deletOnExit set.",
               !fs.exists(file1));
    assertTrue(file2 + " still exists inspite of deletOnExit set.",
               !fs.exists(file2));
    assertTrue(file3 + " still exists inspite of deletOnExit set.",
               !localfs.exists(file3));
    System.out.println("DeleteOnExit successful.");

  } finally {
    IOUtils.closeStream(fs);
    IOUtils.closeStream(localfs);
    cluster.shutdown();
  }
}
 
Example 20
Source File: TestFileCreation.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test deleteOnExit
 */
@Test
public void testDeleteOnExit() throws IOException {
  Configuration conf = new HdfsConfiguration();
  if (simulatedStorage) {
    SimulatedFSDataset.setFactory(conf);
  }
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = cluster.getFileSystem();
  FileSystem localfs = FileSystem.getLocal(conf);

  try {

    // Creates files in HDFS and local file system.
    //
    Path file1 = new Path("filestatus.dat");
    Path file2 = new Path("filestatus2.dat");
    Path file3 = new Path("filestatus3.dat");
    FSDataOutputStream stm1 = createFile(fs, file1, 1);
    FSDataOutputStream stm2 = createFile(fs, file2, 1);
    FSDataOutputStream stm3 = createFile(localfs, file3, 1);
    System.out.println("DeleteOnExit: Created files.");

    // write to files and close. Purposely, do not close file2.
    writeFile(stm1);
    writeFile(stm3);
    stm1.close();
    stm2.close();
    stm3.close();

    // set delete on exit flag on files.
    fs.deleteOnExit(file1);
    fs.deleteOnExit(file2);
    localfs.deleteOnExit(file3);

    // close the file system. This should make the above files
    // disappear.
    fs.close();
    localfs.close();
    fs = null;
    localfs = null;

    // reopen file system and verify that file does not exist.
    fs = cluster.getFileSystem();
    localfs = FileSystem.getLocal(conf);

    assertTrue(file1 + " still exists inspite of deletOnExit set.",
               !fs.exists(file1));
    assertTrue(file2 + " still exists inspite of deletOnExit set.",
               !fs.exists(file2));
    assertTrue(file3 + " still exists inspite of deletOnExit set.",
               !localfs.exists(file3));
    System.out.println("DeleteOnExit successful.");

  } finally {
    IOUtils.closeStream(fs);
    IOUtils.closeStream(localfs);
    cluster.shutdown();
  }
}