Java Code Examples for org.apache.hadoop.fs.FileSystem.mkdirs()

The following are Jave code examples for showing how to use mkdirs() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: Transwarp-Sample-Code   File: CreateDir.java   View Source Code Vote up 6 votes
public static void main(String[] args) throws IOException {
    // 通过Java API创建HDFS目录
    Constant constant = new Constant();
    String rootPath = "hdfs://nameservice1";
    System.out.println(rootPath + constant.HDFS_LARGE_FILE_DIR);
    Path p = new Path(rootPath + constant.HDFS_LARGE_FILE_DIR);

    Configuration conf = new Configuration();
    conf.addResource("core-site.xml");
    conf.addResource("hdfs-site.xml");
    conf.addResource("yarn-site.xml");
    // 没开kerberos,注释下面两行
    FileSystem fs = p.getFileSystem(conf);
    boolean b = fs.mkdirs(p);
    System.out.println(b);
    fs.close();
}
 
Example 2
Project: ditb   File: TestCleanerChore.java   View Source Code Vote up 6 votes
@Test
public void testSavesFilesOnRequest() throws Exception {
  Stoppable stop = new StoppableImplementation();
  Configuration conf = UTIL.getConfiguration();
  Path testDir = UTIL.getDataTestDir();
  FileSystem fs = UTIL.getTestFileSystem();
  String confKey = "hbase.test.cleaner.delegates";
  conf.set(confKey, NeverDelete.class.getName());

  AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);

  // create the directory layout in the directory to clean
  Path parent = new Path(testDir, "parent");
  Path file = new Path(parent, "someFile");
  fs.mkdirs(parent);
  // touch a new file
  fs.create(file).close();
  assertTrue("Test file didn't get created.", fs.exists(file));

  // run the chore
  chore.chore();

  // verify all the files got deleted
  assertTrue("File didn't get deleted", fs.exists(file));
  assertTrue("Empty directory didn't get deleted", fs.exists(parent));
}
 
Example 3
Project: hadoop   File: FSAclBaseTest.java   View Source Code Vote up 6 votes
@Test
public void testModifyAclEntriesOnlyDefault() throws IOException {
  FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
  List<AclEntry> aclSpec = Lists.newArrayList(
    aclEntry(DEFAULT, USER, "foo", ALL));
  fs.setAcl(path, aclSpec);
  aclSpec = Lists.newArrayList(
    aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
  fs.modifyAclEntries(path, aclSpec);
  AclStatus s = fs.getAclStatus(path);
  AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(new AclEntry[] {
    aclEntry(DEFAULT, USER, ALL),
    aclEntry(DEFAULT, USER, "foo", READ_EXECUTE),
    aclEntry(DEFAULT, GROUP, READ_EXECUTE),
    aclEntry(DEFAULT, MASK, READ_EXECUTE),
    aclEntry(DEFAULT, OTHER, NONE) }, returned);
  assertPermission((short)010750);
  assertAclFeature(true);
}
 
Example 4
Project: ditb   File: HBaseFsck.java   View Source Code Vote up 6 votes
@Override
public FSDataOutputStream call() throws IOException {
  try {
    FileSystem fs = FSUtils.getCurrentFileSystem(getConf());
    FsPermission defaultPerms = FSUtils.getFilePermissions(fs, getConf(),
        HConstants.DATA_FILE_UMASK_KEY);
    Path tmpDir = new Path(FSUtils.getRootDir(getConf()), HConstants.HBASE_TEMP_DIRECTORY);
    fs.mkdirs(tmpDir);
    HBCK_LOCK_PATH = new Path(tmpDir, HBCK_LOCK_FILE);
    final FSDataOutputStream out = createFileWithRetries(fs, HBCK_LOCK_PATH, defaultPerms);
    out.writeBytes(InetAddress.getLocalHost().toString());
    out.flush();
    return out;
  } catch(RemoteException e) {
    if(AlreadyBeingCreatedException.class.getName().equals(e.getClassName())){
      return null;
    } else {
      throw e;
    }
  }
}
 
Example 5
Project: hadoop   File: TestMapRed.java   View Source Code Vote up 6 votes
private void checkCompression(boolean compressMapOutputs,
                              CompressionType redCompression,
                              boolean includeCombine
                              ) throws Exception {
  JobConf conf = new JobConf(TestMapRed.class);
  Path testdir = new Path(TEST_DIR.getAbsolutePath());
  Path inDir = new Path(testdir, "in");
  Path outDir = new Path(testdir, "out");
  FileSystem fs = FileSystem.get(conf);
  fs.delete(testdir, true);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setMapperClass(MyMap.class);
  conf.setReducerClass(MyReduce.class);
  conf.setOutputKeyClass(Text.class);
  conf.setOutputValueClass(Text.class);
  conf.setOutputFormat(SequenceFileOutputFormat.class);
  conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
  if (includeCombine) {
    conf.setCombinerClass(IdentityReducer.class);
  }
  conf.setCompressMapOutput(compressMapOutputs);
  SequenceFileOutputFormat.setOutputCompressionType(conf, redCompression);
  try {
    if (!fs.mkdirs(testdir)) {
      throw new IOException("Mkdirs failed to create " + testdir.toString());
    }
    if (!fs.mkdirs(inDir)) {
      throw new IOException("Mkdirs failed to create " + inDir.toString());
    }
    Path inFile = new Path(inDir, "part0");
    DataOutputStream f = fs.create(inFile);
    f.writeBytes("Owen was here\n");
    f.writeBytes("Hadoop is fun\n");
    f.writeBytes("Is this done, yet?\n");
    f.close();
    RunningJob rj = JobClient.runJob(conf);
    assertTrue("job was complete", rj.isComplete());
    assertTrue("job was successful", rj.isSuccessful());
    Path output = new Path(outDir,
                           Task.getOutputName(0));
    assertTrue("reduce output exists " + output, fs.exists(output));
    SequenceFile.Reader rdr = 
      new SequenceFile.Reader(fs, output, conf);
    assertEquals("is reduce output compressed " + output, 
                 redCompression != CompressionType.NONE, 
                 rdr.isCompressed());
    rdr.close();
  } finally {
    fs.delete(testdir, true);
  }
}
 
Example 6
Project: hadoop   File: TestDistCpSystem.java   View Source Code Vote up 6 votes
private void createFiles(FileSystem fs, String topdir,
    FileEntry[] entries) throws IOException {
  for (FileEntry entry : entries) {
    Path newpath = new Path(topdir + "/" + entry.getPath());
    if (entry.isDirectory()) {
      fs.mkdirs(newpath);
    } else {
      OutputStream out = fs.create(newpath);
      try {
        out.write((topdir + "/" + entry).getBytes());
        out.write("\n".getBytes());
      } finally {
        out.close();
      }
    }
  }
}
 
Example 7
Project: hadoop   File: FSAclBaseTest.java   View Source Code Vote up 6 votes
@Test
public void testSetPermissionCannotSetAclBit() throws IOException {
  FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
  fs.setPermission(path, FsPermission.createImmutable((short)0700));
  assertPermission((short)0700);
  fs.setPermission(path,
    new FsPermissionExtension(FsPermission.
        createImmutable((short)0755), true, true));
  INode inode = cluster.getNamesystem().getFSDirectory().getINode(
      path.toUri().getPath(), false);
  assertNotNull(inode);
  FsPermission perm = inode.getFsPermission();
  assertNotNull(perm);
  assertEquals(0755, perm.toShort());
  assertEquals(0755, perm.toExtendedShort());
  assertAclFeature(false);
}
 
Example 8
Project: hadoop   File: ControlledJob.java   View Source Code Vote up 6 votes
/**
 * Submit this job to mapred. The state becomes RUNNING if submission 
 * is successful, FAILED otherwise.  
 */
protected synchronized void submit() {
  try {
    Configuration conf = job.getConfiguration();
    if (conf.getBoolean(CREATE_DIR, false)) {
      FileSystem fs = FileSystem.get(conf);
      Path inputPaths[] = FileInputFormat.getInputPaths(job);
      for (int i = 0; i < inputPaths.length; i++) {
        if (!fs.exists(inputPaths[i])) {
          try {
            fs.mkdirs(inputPaths[i]);
          } catch (IOException e) {

          }
        }
      }
    }
    job.submit();
    this.state = State.RUNNING;
  } catch (Exception ioe) {
    LOG.info(getJobName()+" got an error while submitting ",ioe);
    this.state = State.FAILED;
    this.message = StringUtils.stringifyException(ioe);
  }
}
 
Example 9
Project: hadoop   File: TestGlobbedCopyListing.java   View Source Code Vote up 5 votes
private static void mkdirs(String path) throws Exception {
  FileSystem fileSystem = null;
  try {
    fileSystem = cluster.getFileSystem();
    fileSystem.mkdirs(new Path(path));
    recordInExpectedValues(path);
  }
  finally {
    IOUtils.cleanup(null, fileSystem);
  }
}
 
Example 10
Project: ditb   File: TestCleanerChore.java   View Source Code Vote up 5 votes
/**
 * Test to make sure that we don't attempt to ask the delegate whether or not we should preserve a
 * directory.
 * @throws Exception on failure
 */
@Test
public void testDoesNotCheckDirectories() throws Exception {
  Stoppable stop = new StoppableImplementation();
  Configuration conf = UTIL.getConfiguration();
  Path testDir = UTIL.getDataTestDir();
  FileSystem fs = UTIL.getTestFileSystem();
  String confKey = "hbase.test.cleaner.delegates";
  conf.set(confKey, AlwaysDelete.class.getName());

  AllValidPaths chore = new AllValidPaths("test-file-cleaner", stop, conf, fs, testDir, confKey);
  // spy on the delegate to ensure that we don't check for directories
  AlwaysDelete delegate = (AlwaysDelete) chore.cleanersChain.get(0);
  AlwaysDelete spy = Mockito.spy(delegate);
  chore.cleanersChain.set(0, spy);

  // create the directory layout in the directory to clean
  Path parent = new Path(testDir, "parent");
  Path file = new Path(parent, "someFile");
  fs.mkdirs(parent);
  assertTrue("Test parent didn't get created.", fs.exists(parent));
  // touch a new file
  fs.create(file).close();
  assertTrue("Test file didn't get created.", fs.exists(file));
  
  FileStatus fStat = fs.getFileStatus(parent);
  chore.chore();
  // make sure we never checked the directory
  Mockito.verify(spy, Mockito.never()).isFileDeletable(fStat);
  Mockito.reset(spy);
}
 
Example 11
Project: hadoop   File: FileOutputCommitter.java   View Source Code Vote up 5 votes
private void renameOrMerge(FileSystem fs, FileStatus from, Path to)
    throws IOException {
  if (algorithmVersion == 1) {
    if (!fs.rename(from.getPath(), to)) {
      throw new IOException("Failed to rename " + from + " to " + to);
    }
  } else {
    fs.mkdirs(to);
    for (FileStatus subFrom : fs.listStatus(from.getPath())) {
      Path subTo = new Path(to, subFrom.getPath().getName());
      mergePaths(fs, subFrom, subTo);
    }
  }
}
 
Example 12
Project: hadoop   File: AbstractContractRootDirectoryTest.java   View Source Code Vote up 5 votes
@Test
public void testMkDirDepth1() throws Throwable {
  FileSystem fs = getFileSystem();
  Path dir = new Path("/testmkdirdepth1");
  assertPathDoesNotExist("directory already exists", dir);
  fs.mkdirs(dir);
  ContractTestUtils.assertIsDirectory(getFileSystem(), dir);
  assertPathExists("directory already exists", dir);
  assertDeleted(dir, true);
}
 
Example 13
Project: ditb   File: TestSnapshotHFileCleaner.java   View Source Code Vote up 5 votes
@Test
public void testFindsSnapshotFilesWhenCleaning() throws IOException {
  Configuration conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  Path rootDir = FSUtils.getRootDir(conf);
  Path archivedHfileDir = new Path(TEST_UTIL.getDataTestDir(), HConstants.HFILE_ARCHIVE_DIRECTORY);

  FileSystem fs = FileSystem.get(conf);
  SnapshotHFileCleaner cleaner = new SnapshotHFileCleaner();
  cleaner.setConf(conf);

  // write an hfile to the snapshot directory
  String snapshotName = "snapshot";
  byte[] snapshot = Bytes.toBytes(snapshotName);
  TableName tableName = TableName.valueOf("table");
  Path snapshotDir = SnapshotDescriptionUtils.getCompletedSnapshotDir(snapshotName, rootDir);
  HRegionInfo mockRegion = new HRegionInfo(tableName);
  Path regionSnapshotDir = new Path(snapshotDir, mockRegion.getEncodedName());
  Path familyDir = new Path(regionSnapshotDir, "family");
  // create a reference to a supposedly valid hfile
  String hfile = "fd1e73e8a96c486090c5cec07b4894c4";
  Path refFile = new Path(familyDir, hfile);

  // make sure the reference file exists
  fs.create(refFile);

  // create the hfile in the archive
  fs.mkdirs(archivedHfileDir);
  fs.createNewFile(new Path(archivedHfileDir, hfile));

  // make sure that the file isn't deletable
  assertFalse(cleaner.isFileDeletable(fs.getFileStatus(refFile)));
}
 
Example 14
Project: aliyun-maxcompute-data-collectors   File: TestImportJob.java   View Source Code Vote up 5 votes
public void testFailedIllegalColumns() throws IOException {
  // Make sure that if a MapReduce job to do the import fails due
  // to an IOException, we tell the user about it.

  // Create a table to attempt to import.
  createTableForColType("VARCHAR(32)", "'meep'");

  Configuration conf = new Configuration();

  // Make the output dir exist so we know the job will fail via IOException.
  Path outputPath = new Path(new Path(getWarehouseDir()), getTableName());
  FileSystem fs = FileSystem.getLocal(conf);
  fs.mkdirs(outputPath);

  assertTrue(fs.exists(outputPath));

  // DATA_COL0 ok, by zyzzyva not good
  String [] argv = getArgv(true, new String [] { "DATA_COL0", "zyzzyva" },
      conf);

  Sqoop importer = new Sqoop(new ImportTool());
  try {
    int ret = Sqoop.runSqoop(importer, argv);
    assertTrue("Expected job to fail due bad colname.", 1==ret);
  } catch (Exception e) {
    // In debug mode, IOException is wrapped in RuntimeException.
    LOG.info("Got exceptional return (expected: ok). msg is: " + e);
  }
}
 
Example 15
Project: hadoop   File: TestCheckpoint.java   View Source Code Vote up 4 votes
/**
 * Test that the 2NN triggers a checkpoint after the configurable interval
 */
@Test(timeout=30000)
public void testCheckpointTriggerOnTxnCount() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  Configuration conf = new HdfsConfiguration();

  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 10);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
  
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(0)
        .format(true).build();
    FileSystem fs = cluster.getFileSystem();
    secondary = startSecondaryNameNode(conf);
    secondary.startCheckpointThread();
    final NNStorage storage = secondary.getFSImage().getStorage();

    // 2NN should checkpoint at startup
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        LOG.info("Waiting for checkpoint txn id to go to 2");
        return storage.getMostRecentCheckpointTxId() == 2;
      }
    }, 200, 15000);

    // If we make 10 transactions, it should checkpoint again
    for (int i = 0; i < 10; i++) {
      fs.mkdirs(new Path("/test" + i));
    }
    
    GenericTestUtils.waitFor(new Supplier<Boolean>() {
      @Override
      public Boolean get() {
        LOG.info("Waiting for checkpoint txn id to go > 2");
        return storage.getMostRecentCheckpointTxId() > 2;
      }
    }, 200, 15000);
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 16
Project: hadoop   File: TestAclWithSnapshot.java   View Source Code Vote up 4 votes
@Test
public void testOriginalAclEnforcedForSnapshotContentsAfterChange()
    throws Exception {
  Path filePath = new Path(path, "file1");
  Path subdirPath = new Path(path, "subdir1");
  Path fileSnapshotPath = new Path(snapshotPath, "file1");
  Path subdirSnapshotPath = new Path(snapshotPath, "subdir1");
  FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0777));
  FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
    .close();
  FileSystem.mkdirs(hdfs, subdirPath, FsPermission.createImmutable(
    (short)0700));
  List<AclEntry> aclSpec = Lists.newArrayList(
    aclEntry(ACCESS, USER, READ_EXECUTE),
    aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
    aclEntry(ACCESS, GROUP, NONE),
    aclEntry(ACCESS, OTHER, NONE));
  hdfs.setAcl(filePath, aclSpec);
  hdfs.setAcl(subdirPath, aclSpec);

  assertFilePermissionGranted(fsAsBruce, BRUCE, filePath);
  assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
  assertDirPermissionGranted(fsAsBruce, BRUCE, subdirPath);
  assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);

  SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);

  // Both original and snapshot still have same ACL.
  AclEntry[] expected = new AclEntry[] {
    aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
    aclEntry(ACCESS, GROUP, NONE) };
  AclStatus s = hdfs.getAclStatus(filePath);
  AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(expected, returned);
  assertPermission((short)010550, filePath);

  s = hdfs.getAclStatus(subdirPath);
  returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(expected, returned);
  assertPermission((short)010550, subdirPath);

  s = hdfs.getAclStatus(fileSnapshotPath);
  returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(expected, returned);
  assertPermission((short)010550, fileSnapshotPath);
  assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
  assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);

  s = hdfs.getAclStatus(subdirSnapshotPath);
  returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(expected, returned);
  assertPermission((short)010550, subdirSnapshotPath);
  assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
  assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);

  aclSpec = Lists.newArrayList(
    aclEntry(ACCESS, USER, READ_EXECUTE),
    aclEntry(ACCESS, USER, "diana", ALL),
    aclEntry(ACCESS, GROUP, NONE),
    aclEntry(ACCESS, OTHER, NONE));
  hdfs.setAcl(filePath, aclSpec);
  hdfs.setAcl(subdirPath, aclSpec);

  // Original has changed, but snapshot still has old ACL.
  doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
    subdirSnapshotPath);
  restart(false);
  doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
    subdirSnapshotPath);
  restart(true);
  doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
    subdirSnapshotPath);
}
 
Example 17
Project: ditb   File: TestHFileArchiving.java   View Source Code Vote up 4 votes
/**
 * Test HFileArchiver.resolveAndArchive() race condition HBASE-7643
 */
@Test
public void testCleaningRace() throws Exception {
  final long TEST_TIME = 20 * 1000;
  final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");

  Configuration conf = UTIL.getMiniHBaseCluster().getMaster().getConfiguration();
  Path rootDir = UTIL.getDataTestDirOnTestFS("testCleaningRace");
  FileSystem fs = UTIL.getTestFileSystem();

  Path archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
  Path regionDir = new Path(FSUtils.getTableDir(new Path("./"),
      TableName.valueOf("table")), "abcdef");
  Path familyDir = new Path(regionDir, "cf");

  Path sourceRegionDir = new Path(rootDir, regionDir);
  fs.mkdirs(sourceRegionDir);

  Stoppable stoppable = new StoppableImplementation();

  // The cleaner should be looping without long pauses to reproduce the race condition.
  HFileCleaner cleaner = new HFileCleaner(1, stoppable, conf, fs, archiveDir);
  try {
    choreService.scheduleChore(cleaner);

    // Keep creating/archiving new files while the cleaner is running in the other thread
    long startTime = System.currentTimeMillis();
    for (long fid = 0; (System.currentTimeMillis() - startTime) < TEST_TIME; ++fid) {
      Path file = new Path(familyDir,  String.valueOf(fid));
      Path sourceFile = new Path(rootDir, file);
      Path archiveFile = new Path(archiveDir, file);

      fs.createNewFile(sourceFile);

      try {
        // Try to archive the file
        HFileArchiver.archiveRegion(fs, rootDir,
            sourceRegionDir.getParent(), sourceRegionDir);

        // The archiver succeded, the file is no longer in the original location
        // but it's in the archive location.
        LOG.debug("hfile=" + fid + " should be in the archive");
        assertTrue(fs.exists(archiveFile));
        assertFalse(fs.exists(sourceFile));
      } catch (IOException e) {
        // The archiver is unable to archive the file. Probably HBASE-7643 race condition.
        // in this case, the file should not be archived, and we should have the file
        // in the original location.
        LOG.debug("hfile=" + fid + " should be in the source location");
        assertFalse(fs.exists(archiveFile));
        assertTrue(fs.exists(sourceFile));

        // Avoid to have this file in the next run
        fs.delete(sourceFile, false);
      }
    }
  } finally {
    stoppable.stop("test end");
    cleaner.cancel(true);
    choreService.shutdown();
    fs.delete(rootDir, true);
  }
}
 
Example 18
Project: hadoop   File: TestMapReduceChain.java   View Source Code Vote up 4 votes
private static void cleanFlags(Configuration conf) throws IOException {
  FileSystem fs = FileSystem.get(conf);
  fs.delete(flagDir, true);
  fs.mkdirs(flagDir);
}
 
Example 19
Project: hadoop   File: TestTextOutputFormat.java   View Source Code Vote up 4 votes
@Test
public void testFormat() throws Exception {
  JobConf job = new JobConf();
  job.set(JobContext.TASK_ATTEMPT_ID, attempt);
  FileOutputFormat.setOutputPath(job, workDir.getParent().getParent());
  FileOutputFormat.setWorkOutputPath(job, workDir);
  FileSystem fs = workDir.getFileSystem(job);
  if (!fs.mkdirs(workDir)) {
    fail("Failed to create output directory");
  }
  String file = "test_format.txt";

  // A reporter that does nothing
  Reporter reporter = Reporter.NULL;

  TextOutputFormat<Object,Object> theOutputFormat = new TextOutputFormat<Object,Object>();
  RecordWriter<Object,Object> theRecordWriter =
    theOutputFormat.getRecordWriter(localFs, job, file, reporter);

  Text key1 = new Text("key1");
  Text key2 = new Text("key2");
  Text val1 = new Text("val1");
  Text val2 = new Text("val2");
  NullWritable nullWritable = NullWritable.get();

  try {
    theRecordWriter.write(key1, val1);
    theRecordWriter.write(null, nullWritable);
    theRecordWriter.write(null, val1);
    theRecordWriter.write(nullWritable, val2);
    theRecordWriter.write(key2, nullWritable);
    theRecordWriter.write(key1, null);
    theRecordWriter.write(null, null);
    theRecordWriter.write(key2, val2);

  } finally {
    theRecordWriter.close(reporter);
  }
  File expectedFile = new File(new Path(workDir, file).toString());
  StringBuffer expectedOutput = new StringBuffer();
  expectedOutput.append(key1).append('\t').append(val1).append("\n");
  expectedOutput.append(val1).append("\n");
  expectedOutput.append(val2).append("\n");
  expectedOutput.append(key2).append("\n");
  expectedOutput.append(key1).append("\n");
  expectedOutput.append(key2).append('\t').append(val2).append("\n");
  String output = UtilsForTests.slurp(expectedFile);
  assertEquals(expectedOutput.toString(), output);

}
 
Example 20
Project: hadoop   File: FSOperations.java   View Source Code Vote up 3 votes
/**
 * Executes the filesystem operation.
 *
 * @param fs filesystem instance to use.
 *
 * @return <code>true</code> if the mkdirs operation was successful,
 *         <code>false</code> otherwise.
 *
 * @throws IOException thrown if an IO error occured.
 */
@Override
public JSONObject execute(FileSystem fs) throws IOException {
  FsPermission fsPermission = new FsPermission(permission);
  boolean mkdirs = fs.mkdirs(path, fsPermission);
  return toJSON(HttpFSFileSystem.MKDIRS_JSON, mkdirs);
}