Java Code Examples for org.apache.hadoop.fs.FileSystem.delete()

The following are Jave code examples for showing how to use delete() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: TestFileSystemApplicationHistoryStore.java   View Source Code Vote up 7 votes
private void initAndStartStore(final FileSystem fs) throws IOException,
    URISyntaxException {
  Configuration conf = new Configuration();
  fs.initialize(new URI("/"), conf);
  fsWorkingPath =
      new Path("target",
        TestFileSystemApplicationHistoryStore.class.getSimpleName());
  fs.delete(fsWorkingPath, true);
  conf.set(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI,
    fsWorkingPath.toString());
  store = new FileSystemApplicationHistoryStore() {
    @Override
    protected FileSystem getFileSystem(Path path, Configuration conf) {
      return fs;
    }
  };
  store.init(conf);
  store.start();
}
 
Example 2
Project: hadoop   File: TestMiniMRChildTask.java   View Source Code Vote up 6 votes
/**
 * Test to test if the user set env variables reflect in the child
 * processes. Mainly
 *   - x=y (x can be a already existing env variable or a new variable)
 *   - x=$x:y (replace $x with the current value of x)
 */
@Test
public void testTaskEnv(){
  try {
    JobConf conf = new JobConf(mr.getConfig());
    // initialize input, output directories
    Path inDir = new Path("testing/wc/input1");
    Path outDir = new Path("testing/wc/output1");
    FileSystem outFs = outDir.getFileSystem(conf);
    runTestTaskEnv(conf, inDir, outDir, false);
    outFs.delete(outDir, true);
  } catch(Exception e) {
    e.printStackTrace();
    fail("Exception in testing child env");
    tearDown();
  }
}
 
Example 3
Project: hadoop   File: PartialFileOutputCommitter.java   View Source Code Vote up 6 votes
@Override
public void cleanUpPartialOutputForTask(TaskAttemptContext context)
    throws IOException {

  // we double check this is never invoked from a non-preemptable subclass.
  // This should never happen, since the invoking codes is checking it too,
  // but it is safer to double check. Errors handling this would produce
  // inconsistent output.

  if (!this.getClass().isAnnotationPresent(Checkpointable.class)) {
    throw new IllegalStateException("Invoking cleanUpPartialOutputForTask() " +
        "from non @Preemptable class");
  }
  FileSystem fs =
    fsFor(getTaskAttemptPath(context), context.getConfiguration());

  LOG.info("cleanUpPartialOutputForTask: removing everything belonging to " +
      context.getTaskAttemptID().getTaskID() + " in: " +
      getCommittedTaskPath(context).getParent());

  final TaskAttemptID taid = context.getTaskAttemptID();
  final TaskID tid = taid.getTaskID();
  Path pCommit = getCommittedTaskPath(context).getParent();
  // remove any committed output
  for (int i = 0; i < taid.getId(); ++i) {
    TaskAttemptID oldId = new TaskAttemptID(tid, i);
    Path pTask = new Path(pCommit, oldId.toString());
    if (fs.exists(pTask) && !fs.delete(pTask, true)) {
      throw new IOException("Failed to delete " + pTask);
    }
  }
}
 
Example 4
Project: Hydrograph   File: LingualSchemaCreatorTest.java   View Source Code Vote up 6 votes
@AfterClass
public static void cleanUp() {
	System.gc();
	Configuration configuration = new Configuration();
	FileSystem fileSystem = null;

	try {
		fileSystem = FileSystem.get(configuration);
		Path deletingFilePath = new Path("testData/MetaData/");
		if (!fileSystem.exists(deletingFilePath)) {
			throw new PathNotFoundException(deletingFilePath.toString());
		} else {

			boolean isDeleted = fileSystem.delete(deletingFilePath, true);
			if (isDeleted) {
				fileSystem.deleteOnExit(deletingFilePath);
			}
		}
		fileSystem.close();
	} catch (IOException e) {
		e.printStackTrace();
	}
}
 
Example 5
Project: hadoop   File: TestUserDefinedCounters.java   View Source Code Vote up 5 votes
private void cleanAndCreateInput(FileSystem fs) throws IOException {
  fs.delete(INPUT_DIR, true);
  fs.delete(OUTPUT_DIR, true);

  OutputStream os = fs.create(INPUT_FILE);

  Writer wr = new OutputStreamWriter(os);
  wr.write("hello1\n");
  wr.write("hello2\n");
  wr.write("hello3\n");
  wr.write("hello4\n");
  wr.close();
}
 
Example 6
Project: ditb   File: TestHFileSeek.java   View Source Code Vote up 5 votes
private static FSDataOutputStream createFSOutput(Path name, FileSystem fs)
  throws IOException {
  if (fs.exists(name)) {
    fs.delete(name, true);
  }
  FSDataOutputStream fout = fs.create(name);
  return fout;
}
 
Example 7
Project: circus-train   File: S3MapReduceCpTestUtils.java   View Source Code Vote up 5 votes
/** Creates a new, empty directory at dirPath and always overwrites */
public static void createDirectory(FileSystem fs, Path dirPath) throws IOException {
  fs.delete(dirPath, true);
  boolean created = fs.mkdirs(dirPath);
  if (!created) {
    LOG.warn("Could not create directory " + dirPath + " this might cause test failures.");
  }
}
 
Example 8
Project: circus-train   File: CopyListing.java   View Source Code Vote up 5 votes
/**
 * Sort sequence file containing FileStatus and Text as key and value respecitvely
 *
 * @param fs File System
 * @param conf Configuration
 * @param sourceListing Source listing file
 * @return Path of the sorted file. Is source file with _sorted appended to the name
 * @throws IOException Any exception during sort.
 */
private static Path sortListing(FileSystem fs, Configuration conf, Path sourceListing) throws IOException {
  SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, Text.class, CopyListingFileStatus.class, conf);
  Path output = new Path(sourceListing.toString() + "_sorted");

  if (fs.exists(output)) {
    fs.delete(output, false);
  }

  sorter.sort(sourceListing, output);
  return output;
}
 
Example 9
Project: hadoop   File: TestKeyFieldBasedComparator.java   View Source Code Vote up 5 votes
public void configure(String keySpec, int expect) throws Exception {
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = getFileSystem();
  fs.delete(testdir, true);
  conf.setInputFormat(TextInputFormat.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(LongWritable.class);

  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(1);

  conf.setOutputFormat(TextOutputFormat.class);
  conf.setOutputKeyComparatorClass(KeyFieldBasedComparator.class);
  conf.setKeyFieldComparatorOptions(keySpec);
  conf.setKeyFieldPartitionerOptions("-k1.1,1.1");
  conf.set(JobContext.MAP_OUTPUT_KEY_FIELD_SEPERATOR, " ");
  conf.setMapperClass(InverseMapper.class);
  conf.setReducerClass(IdentityReducer.class);
  if (!fs.mkdirs(testdir)) {
    throw new IOException("Mkdirs failed to create " + testdir.toString());
  }
  if (!fs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  // set up input data in 2 files 
  Path inFile = new Path(inDir, "part0");
  FileOutputStream fos = new FileOutputStream(inFile.toString());
  fos.write((line1 + "\n").getBytes());
  fos.write((line2 + "\n").getBytes());
  fos.close();
  JobClient jc = new JobClient(conf);
  RunningJob r_job = jc.submitJob(conf);
  while (!r_job.isComplete()) {
    Thread.sleep(1000);
  }
  
  if (!r_job.isSuccessful()) {
    fail("Oops! The job broke due to an unexpected error");
  }
  Path[] outputFiles = FileUtil.stat2Paths(
      getFileSystem().listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
  if (outputFiles.length > 0) {
    InputStream is = getFileSystem().open(outputFiles[0]);
    BufferedReader reader = new BufferedReader(new InputStreamReader(is));
    String line = reader.readLine();
    //make sure we get what we expect as the first line, and also
    //that we have two lines
    if (expect == 1) {
      assertTrue(line.startsWith(line1));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line2));
    }
    line = reader.readLine();
    if (expect == 1) {
      assertTrue(line.startsWith(line2));
    } else if (expect == 2) {
      assertTrue(line.startsWith(line1));
    }
    reader.close();
  }
}
 
Example 10
Project: hadoop   File: TestDistCpUtils.java   View Source Code Vote up 5 votes
public static void delete(FileSystem fs, String path) {
  try {
    if (fs != null) {
      if (path != null) {
        fs.delete(new Path(path), true);
      }
    }
  } catch (IOException e) {
    LOG.warn("Exception encountered ", e);
  }
}
 
Example 11
Project: hadoop   File: SwiftTestUtils.java   View Source Code Vote up 5 votes
public static void assertDeleted(FileSystem fs,
                                 Path file,
                                 boolean recursive) throws IOException {
  assertPathExists(fs, "about to be deleted file", file);
  boolean deleted = fs.delete(file, recursive);
  String dir = ls(fs, file.getParent());
  assertTrue("Delete failed on " + file + ": " + dir, deleted);
  assertPathDoesNotExist(fs, "Deleted file", file);
}
 
Example 12
Project: aliyun-maxcompute-data-collectors   File: TestBlobRef.java   View Source Code Vote up 5 votes
public void testExternalSubdir() throws IOException {
  final byte [] DATA = { 1, 2, 3, 4, 5 };
  final String FILENAME = "_lob/blobdata";

  try {
    doExternalTest(DATA, FILENAME);
  } finally {
    // remove dir we made.
    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.getLocal(conf);
    String tmpDir = System.getProperty("test.build.data", "/tmp/");
    Path lobDir = new Path(new Path(tmpDir), "_lob");
    fs.delete(lobDir, true);
  }
}
 
Example 13
Project: hadoop   File: TestFsck.java   View Source Code Vote up 5 votes
/** Test if fsck can return -1 in case of failure
 * 
 * @throws Exception
 */
@Test
public void testFsckError() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    // bring up a one-node cluster
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).build();
    String fileName = "/test.txt";
    Path filePath = new Path(fileName);
    FileSystem fs = cluster.getFileSystem();
    
    // create a one-block file
    DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
    DFSTestUtil.waitReplication(fs, filePath, (short)1);
    
    // intentionally corrupt NN data structure
    INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
        (fileName, true);
    final BlockInfoContiguous[] blocks = node.getBlocks();
    assertEquals(blocks.length, 1);
    blocks[0].setNumBytes(-1L);  // set the block length to be negative
    
    // run fsck and expect a failure with -1 as the error code
    String outStr = runFsck(conf, -1, true, fileName);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
    
    // clean up file system
    fs.delete(filePath, true);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 14
Project: hadoop   File: CopyCommitter.java   View Source Code Vote up 4 votes
private void deleteMissing(Configuration conf) throws IOException {
  LOG.info("-delete option is enabled. About to remove entries from " +
      "target that are missing in source");

  // Sort the source-file listing alphabetically.
  Path sourceListing = new Path(conf.get(DistCpConstants.CONF_LABEL_LISTING_FILE_PATH));
  FileSystem clusterFS = sourceListing.getFileSystem(conf);
  Path sortedSourceListing = DistCpUtils.sortListing(clusterFS, conf, sourceListing);

  // Similarly, create the listing of target-files. Sort alphabetically.
  Path targetListing = new Path(sourceListing.getParent(), "targetListing.seq");
  CopyListing target = new GlobbedCopyListing(new Configuration(conf), null);

  List<Path> targets = new ArrayList<Path>(1);
  Path targetFinalPath = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
  targets.add(targetFinalPath);
  DistCpOptions options = new DistCpOptions(targets, new Path("/NONE"));
  //
  // Set up options to be the same from the CopyListing.buildListing's perspective,
  // so to collect similar listings as when doing the copy
  //
  options.setOverwrite(overwrite);
  options.setSyncFolder(syncFolder);
  options.setTargetPathExists(targetPathExists);
  
  target.buildListing(targetListing, options);
  Path sortedTargetListing = DistCpUtils.sortListing(clusterFS, conf, targetListing);
  long totalLen = clusterFS.getFileStatus(sortedTargetListing).getLen();

  SequenceFile.Reader sourceReader = new SequenceFile.Reader(conf,
                               SequenceFile.Reader.file(sortedSourceListing));
  SequenceFile.Reader targetReader = new SequenceFile.Reader(conf,
                               SequenceFile.Reader.file(sortedTargetListing));

  // Walk both source and target file listings.
  // Delete all from target that doesn't also exist on source.
  long deletedEntries = 0;
  try {
    CopyListingFileStatus srcFileStatus = new CopyListingFileStatus();
    Text srcRelPath = new Text();
    CopyListingFileStatus trgtFileStatus = new CopyListingFileStatus();
    Text trgtRelPath = new Text();

    FileSystem targetFS = targetFinalPath.getFileSystem(conf);
    boolean srcAvailable = sourceReader.next(srcRelPath, srcFileStatus);
    while (targetReader.next(trgtRelPath, trgtFileStatus)) {
      // Skip sources that don't exist on target.
      while (srcAvailable && trgtRelPath.compareTo(srcRelPath) > 0) {
        srcAvailable = sourceReader.next(srcRelPath, srcFileStatus);
      }

      if (srcAvailable && trgtRelPath.equals(srcRelPath)) continue;

      // Target doesn't exist at source. Delete.
      boolean result = (!targetFS.exists(trgtFileStatus.getPath()) ||
          targetFS.delete(trgtFileStatus.getPath(), true));
      if (result) {
        LOG.info("Deleted " + trgtFileStatus.getPath() + " - Missing at source");
        deletedEntries++;
      } else {
        throw new IOException("Unable to delete " + trgtFileStatus.getPath());
      }
      taskAttemptContext.progress();
      taskAttemptContext.setStatus("Deleting missing files from target. [" +
          targetReader.getPosition() * 100 / totalLen + "%]");
    }
  } finally {
    IOUtils.closeStream(sourceReader);
    IOUtils.closeStream(targetReader);
  }
  LOG.info("Deleted " + deletedEntries + " from target: " + targets.get(0));
}
 
Example 15
Project: flume-release-1.7.0   File: TestHDFSEventSink.java   View Source Code Vote up 4 votes
private void slowAppendTestHelper(long appendTimeout)
    throws InterruptedException, IOException, LifecycleException, EventDeliveryException,
           IOException {
  final String fileName = "FlumeData";
  final long rollCount = 5;
  final long batchSize = 2;
  final int numBatches = 2;
  String newPath = testPath + "/singleBucket";
  int totalEvents = 0;
  int i = 1, j = 1;

  // clear the test directory
  Configuration conf = new Configuration();
  FileSystem fs = FileSystem.get(conf);
  Path dirPath = new Path(newPath);
  fs.delete(dirPath, true);
  fs.mkdirs(dirPath);

  // create HDFS sink with slow writer
  HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
  sink = new HDFSEventSink(badWriterFactory);

  Context context = new Context();
  context.put("hdfs.path", newPath);
  context.put("hdfs.filePrefix", fileName);
  context.put("hdfs.rollCount", String.valueOf(rollCount));
  context.put("hdfs.batchSize", String.valueOf(batchSize));
  context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
  context.put("hdfs.appendTimeout", String.valueOf(appendTimeout));
  Configurables.configure(sink, context);

  Channel channel = new MemoryChannel();
  Configurables.configure(channel, context);

  sink.setChannel(channel);
  sink.start();

  Calendar eventDate = Calendar.getInstance();
  List<String> bodies = Lists.newArrayList();
  // push the event batches into channel
  for (i = 0; i < numBatches; i++) {
    Transaction txn = channel.getTransaction();
    txn.begin();
    for (j = 1; j <= batchSize; j++) {
      Event event = new SimpleEvent();
      eventDate.clear();
      eventDate.set(2011, i, i, i, 0); // yy mm dd
      event.getHeaders().put("timestamp",
          String.valueOf(eventDate.getTimeInMillis()));
      event.getHeaders().put("hostname", "Host" + i);
      event.getHeaders().put("slow", "1500");
      String body = "Test." + i + "." + j;
      event.setBody(body.getBytes());
      bodies.add(body);
      channel.put(event);
      totalEvents++;
    }
    txn.commit();
    txn.close();

    // execute sink to process the events
    sink.process();
  }

  sink.stop();

  // loop through all the files generated and check their contains
  FileStatus[] dirStat = fs.listStatus(dirPath);
  Path[] fList = FileUtil.stat2Paths(dirStat);

  // check that the roll happened correctly for the given data
  // Note that we'll end up with two files with only a head
  long expectedFiles = totalEvents / rollCount;
  if (totalEvents % rollCount > 0) expectedFiles++;
  Assert.assertEquals("num files wrong, found: " +
      Lists.newArrayList(fList), expectedFiles, fList.length);
  verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
 
Example 16
Project: hadoop   File: DistributedPentomino.java   View Source Code Vote up 4 votes
public int run(String[] args) throws Exception {
  Configuration conf = getConf();
  if (args.length == 0) {
    System.out.println("Usage: pentomino <output> [-depth #] [-height #] [-width #]");
    ToolRunner.printGenericCommandUsage(System.out);
    return 2;
  }
  // check for passed parameters, otherwise use defaults
  int width = conf.getInt(Pentomino.WIDTH, PENT_WIDTH);
  int height = conf.getInt(Pentomino.HEIGHT, PENT_HEIGHT);
  int depth = conf.getInt(Pentomino.DEPTH, PENT_DEPTH);
  for (int i = 0; i < args.length; i++) {
    if (args[i].equalsIgnoreCase("-depth")) {
      depth = Integer.parseInt(args[++i].trim());
    } else if (args[i].equalsIgnoreCase("-height")) {
      height = Integer.parseInt(args[++i].trim());
    } else if (args[i].equalsIgnoreCase("-width") ) {
      width = Integer.parseInt(args[++i].trim());
    }
  }
  // now set the values within conf for M/R tasks to read, this
  // will ensure values are set preventing MAPREDUCE-4678
  conf.setInt(Pentomino.WIDTH, width);
  conf.setInt(Pentomino.HEIGHT, height);
  conf.setInt(Pentomino.DEPTH, depth);
  Class<? extends Pentomino> pentClass = conf.getClass(Pentomino.CLASS, 
    OneSidedPentomino.class, Pentomino.class);
  int numMaps = conf.getInt(MRJobConfig.NUM_MAPS, DEFAULT_MAPS);
  Path output = new Path(args[0]);
  Path input = new Path(output + "_input");
  FileSystem fileSys = FileSystem.get(conf);
  try {
    Job job = Job.getInstance(conf);
    FileInputFormat.setInputPaths(job, input);
    FileOutputFormat.setOutputPath(job, output);
    job.setJarByClass(PentMap.class);
    
    job.setJobName("dancingElephant");
    Pentomino pent = ReflectionUtils.newInstance(pentClass, conf);
    pent.initialize(width, height);
    long inputSize = createInputDirectory(fileSys, input, pent, depth);
    // for forcing the number of maps
    FileInputFormat.setMaxInputSplitSize(job, (inputSize/numMaps));
 
    // the keys are the prefix strings
    job.setOutputKeyClass(Text.class);
    // the values are puzzle solutions
    job.setOutputValueClass(Text.class);
    
    job.setMapperClass(PentMap.class);        
    job.setReducerClass(Reducer.class);
    
    job.setNumReduceTasks(1);
    
    return (job.waitForCompletion(true) ? 0 : 1);
    } finally {
    fileSys.delete(input, true);
  }
}
 
Example 17
Project: hadoop   File: TestReplication.java   View Source Code Vote up 4 votes
private void changeBlockLen(MiniDFSCluster cluster, int lenDelta)
    throws IOException, InterruptedException, TimeoutException {
  final Path fileName = new Path("/file1");
  final short REPLICATION_FACTOR = (short)1;
  final FileSystem fs = cluster.getFileSystem();
  final int fileLen = fs.getConf().getInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
  DFSTestUtil.createFile(fs, fileName, fileLen, REPLICATION_FACTOR, 0);
  DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);

  ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);

  // Change the length of a replica
  for (int i=0; i<cluster.getDataNodes().size(); i++) {
    if (DFSTestUtil.changeReplicaLength(cluster, block, i, lenDelta)) {
      break;
    }
  }

  // increase the file's replication factor
  fs.setReplication(fileName, (short)(REPLICATION_FACTOR+1));

  // block replication triggers corrupt block detection
  DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", 
      cluster.getNameNodePort()), fs.getConf());
  LocatedBlocks blocks = dfsClient.getNamenode().getBlockLocations(
      fileName.toString(), 0, fileLen);
  if (lenDelta < 0) { // replica truncated
  	while (!blocks.get(0).isCorrupt() || 
  			REPLICATION_FACTOR != blocks.get(0).getLocations().length) {
  		Thread.sleep(100);
  		blocks = dfsClient.getNamenode().getBlockLocations(
  				fileName.toString(), 0, fileLen);
  	}
  } else { // no corruption detected; block replicated
  	while (REPLICATION_FACTOR+1 != blocks.get(0).getLocations().length) {
  		Thread.sleep(100);
  		blocks = dfsClient.getNamenode().getBlockLocations(
  				fileName.toString(), 0, fileLen);
  	}
  }
  fs.delete(fileName, true);
}
 
Example 18
Project: hadoop   File: ViewFileSystemTestSetup.java   View Source Code Vote up 4 votes
/**
 * 
 * delete the test directory in the target  fs
 */
static public void tearDown(FileSystemTestHelper fileSystemTestHelper, FileSystem fsTarget) throws Exception {
  Path targetOfTests = fileSystemTestHelper.getTestRootPath(fsTarget);
  fsTarget.delete(targetOfTests, true);
}
 
Example 19
Project: hadoop   File: TestDecommissioningStatus.java   View Source Code Vote up 4 votes
static private void cleanupFile(FileSystem fileSys, Path name)
    throws IOException {
  assertTrue(fileSys.exists(name));
  fileSys.delete(name, true);
  assertTrue(!fileSys.exists(name));
}
 
Example 20
Project: hadoop   File: SwiftTestUtils.java   View Source Code Vote up 4 votes
/**
 *
 * Write a file and read it in, validating the result. Optional flags control
 * whether file overwrite operations should be enabled, and whether the
 * file should be deleted afterwards.
 *
 * If there is a mismatch between what was written and what was expected,
 * a small range of bytes either side of the first error are logged to aid
 * diagnosing what problem occurred -whether it was a previous file
 * or a corrupting of the current file. This assumes that two
 * sequential runs to the same path use datasets with different character
 * moduli.
 *
 * @param fs filesystem
 * @param path path to write to
 * @param len length of data
 * @param overwrite should the create option allow overwrites?
 * @param delete should the file be deleted afterwards? -with a verification
 * that it worked. Deletion is not attempted if an assertion has failed
 * earlier -it is not in a <code>finally{}</code> block.
 * @throws IOException IO problems
 */
public static void writeAndRead(FileSystem fs,
                                Path path,
                                byte[] src,
                                int len,
                                int blocksize,
                                boolean overwrite,
                                boolean delete) throws IOException {
  fs.mkdirs(path.getParent());

  writeDataset(fs, path, src, len, blocksize, overwrite);

  byte[] dest = readDataset(fs, path, len);

  compareByteArrays(src, dest, len);

  if (delete) {
    boolean deleted = fs.delete(path, false);
    assertTrue("Deleted", deleted);
    assertPathDoesNotExist(fs, "Cleanup failed", path);
  }
}