Java Code Examples for org.apache.hadoop.fs.FileSystem#mkdirs()

The following examples show how to use org.apache.hadoop.fs.FileSystem#mkdirs() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestHttpFSServer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
@TestDir
@TestJetty
@TestHdfs
public void testOpenOffsetLength() throws Exception {
  createHttpFSServer(false);

  byte[] array = new byte[]{0, 1, 2, 3};
  FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
  fs.mkdirs(new Path("/tmp"));
  OutputStream os = fs.create(new Path("/tmp/foo"));
  os.write(array);
  os.close();

  String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
  URL url = new URL(TestJettyHelper.getJettyURL(),
                    MessageFormat.format("/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2", user));
  HttpURLConnection conn = (HttpURLConnection) url.openConnection();
  Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
  InputStream is = conn.getInputStream();
  Assert.assertEquals(1, is.read());
  Assert.assertEquals(2, is.read());
  Assert.assertEquals(-1, is.read());
}
 
Example 2
Source File: HdfsInputDeployer.java    From celos with Apache License 2.0 6 votes vote down vote up
@Override
public void deploy(TestRun testRun) throws Exception {
    FileSystem fileSystem = testRun.getCiContext().getFileSystem();

    CollectFilesAndPathsProcessor pathToFile = new CollectFilesAndPathsProcessor();
    TreeObjectProcessor.process(fixObjectCreator.create(testRun), pathToFile);

    Path pathPrefixed = new Path(Util.augmentHdfsPath(testRun.getHdfsPrefix(), path.toString()));
    for (java.nio.file.Path childPath: pathToFile.pathToFiles.keySet()) {
        Path pathTo = new Path(pathPrefixed, childPath.toString());
        fileSystem.mkdirs(pathTo.getParent());

        FSDataOutputStream outputStream = fileSystem.create(pathTo);
        try {
            IOUtils.copy(pathToFile.pathToFiles.get(childPath).getContent(), outputStream);
        } finally {
            outputStream.flush();
            outputStream.close();
        }

    }
}
 
Example 3
Source File: TestFileInputFormat.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static List<Path> configureTestErrorOnNonExistantDir(Configuration conf,
    FileSystem localFs) throws IOException {
  Path base1 = new Path(TEST_ROOT_DIR, "input1");
  Path base2 = new Path(TEST_ROOT_DIR, "input2");
  conf.set(org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR,
      localFs.makeQualified(base1) + "," + localFs.makeQualified(base2));
  conf.setBoolean(
      org.apache.hadoop.mapreduce.lib.input.FileInputFormat.INPUT_DIR_RECURSIVE,
      true);
  localFs.mkdirs(base1);

  Path inFile1 = new Path(base1, "file1");
  Path inFile2 = new Path(base1, "file2");

  localFs.createNewFile(inFile1);
  localFs.createNewFile(inFile2);

  List<Path> expectedPaths = Lists.newArrayList();
  return expectedPaths;
}
 
Example 4
Source File: HadoopUtils.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
/**
 * Renames from to to if to doesn't exist in a non-thread-safe way.
 *
 * @param fs filesystem where rename will be executed.
 * @param from origin {@link Path}.
 * @param to target {@link Path}.
 * @return true if rename succeeded, false if the target already exists.
 * @throws IOException if rename failed for reasons other than target exists.
 */
public static boolean unsafeRenameIfNotExists(FileSystem fs, Path from, Path to) throws IOException {
  if (!fs.exists(to)) {
    if (!fs.exists(to.getParent())) {
      fs.mkdirs(to.getParent());
    }

    if (!renamePathHandleLocalFSRace(fs, from, to)) {
      if (!fs.exists(to)) {
        throw new IOException(String.format("Failed to rename %s to %s.", from, to));
      }

      return false;
    }
    return true;
  }
  return false;
}
 
Example 5
Source File: TestCatalogJanitor.java    From hbase with Apache License 2.0 6 votes vote down vote up
private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir)
  throws IOException {
  // get the existing store files
  FileSystem fs = services.getMasterFileSystem().getFileSystem();
  fs.mkdirs(storedir);
  // create the store files in the parent
  for (int i = 0; i < count; i++) {
    Path storeFile = new Path(storedir, "_store" + i);
    FSDataOutputStream dos = fs.create(storeFile, true);
    dos.writeBytes("Some data: " + i);
    dos.close();
  }
  LOG.debug("Adding " + count + " store files to the storedir:" + storedir);
  // make sure the mock store files are there
  FileStatus[] storeFiles = fs.listStatus(storedir);
  assertEquals("Didn't have expected store files", count, storeFiles.length);
  return storeFiles;
}
 
Example 6
Source File: TestAccessController.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public Object run() throws Exception {
  FileSystem fs = TEST_UTIL.getTestFileSystem();
  fs.mkdirs(testDataDir);
  fs.setPermission(testDataDir, FS_PERMISSION_ALL);
  // Making the assumption that the test table won't split between the range
  byte[][][] hfileRanges = { { { (byte) 0 }, { (byte) 9 } } };
  Path bulkLoadBasePath = new Path(testDataDir, new Path(User.getCurrent().getName()));
  new BulkLoadHelper(bulkLoadBasePath)
      .initHFileData(TEST_FAMILY, TEST_QUALIFIER, hfileRanges, 3, filePermission)
      .bulkLoadHFile(TEST_TABLE);
  return null;
}
 
Example 7
Source File: TestMasterRules.java    From tajo with Apache License 2.0 5 votes vote down vote up
protected void createTajoDirectories(TajoConf tajoConf) throws Exception {
  Path tajoRootDir = new Path(rootFilePath, "tajo-root");
  FileSystem rootFs = tajoRootDir.getFileSystem(tajoConf);
  FsPermission defaultPermission = FsPermission.createImmutable((short)0700);
  
  if (!rootFs.exists(tajoRootDir)) {
    rootFs.mkdirs(tajoRootDir, new FsPermission(defaultPermission));
  }
  
  tajoConf.setVar(ConfVars.ROOT_DIR, tajoRootDir.toUri().toString());
  
  Path tajoSystemDir = new Path(tajoRootDir, TajoConstants.SYSTEM_DIR_NAME);
  if (!rootFs.exists(tajoSystemDir)) {
    rootFs.mkdirs(tajoSystemDir, new FsPermission(defaultPermission));
  }
  
  Path tajoSystemResourceDir = new Path(tajoSystemDir, TajoConstants.SYSTEM_RESOURCE_DIR_NAME);
  if (!rootFs.exists(tajoSystemResourceDir)) {
    rootFs.mkdirs(tajoSystemResourceDir, new FsPermission(defaultPermission));
  }
  
  Path tajoWarehouseDir = new Path(tajoRootDir, TajoConstants.WAREHOUSE_DIR_NAME);
  if (!rootFs.exists(tajoWarehouseDir)) {
    rootFs.mkdirs(tajoWarehouseDir, new FsPermission(defaultPermission));
  }
  
  Path tajoStagingDir = new Path(tajoRootDir, "staging");
  if (!rootFs.exists(tajoStagingDir)) {
    rootFs.mkdirs(tajoStagingDir, new FsPermission(defaultPermission));
  }
  tajoConf.setVar(ConfVars.STAGING_ROOT_DIR, tajoStagingDir.toUri().toString());
}
 
Example 8
Source File: TestMiniMRChildTask.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void configure(JobConf conf, Path inDir, Path outDir, String input,
                       Class<? extends Mapper> map, 
                       Class<? extends Reducer> reduce) 
throws IOException {
  // set up the input file system and write input text.
  FileSystem inFs = inDir.getFileSystem(conf);
  FileSystem outFs = outDir.getFileSystem(conf);
  outFs.delete(outDir, true);
  if (!inFs.mkdirs(inDir)) {
    throw new IOException("Mkdirs failed to create " + inDir.toString());
  }
  {
    // write input into input file
    DataOutputStream file = inFs.create(new Path(inDir, "part-0"));
    file.writeBytes(input);
    file.close();
  }

  // configure the mapred Job which creates a tempfile in map.
  conf.setJobName("testmap");
  conf.setMapperClass(map);
  conf.setReducerClass(reduce);
  conf.setNumMapTasks(1);
  conf.setNumReduceTasks(0);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  String TEST_ROOT_DIR = new Path(System.getProperty("test.build.data",
                                    "/tmp")).toString().replace(' ', '+');
  conf.set("test.build.data", TEST_ROOT_DIR);
}
 
Example 9
Source File: TestDFSMkdirs.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Tests mkdirs can create a directory that does not exist and will
 * not create a subdirectory off a file.
 */
public void testDFSMkdirs() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  FileSystem fileSys = cluster.getFileSystem();
  try {
    // First create a new directory with mkdirs
    Path myPath = new Path("/test/mkdirs");
    assertTrue(fileSys.mkdirs(myPath));
    assertTrue(fileSys.exists(myPath));
    assertTrue(fileSys.mkdirs(myPath));

    // Second, create a file in that directory.
    Path myFile = new Path("/test/mkdirs/myFile");
    writeFile(fileSys, myFile);
 
    // Third, use mkdir to create a subdirectory off of that file,
    // and check that it fails.
    Path myIllegalPath = new Path("/test/mkdirs/myFile/subdir");
    Boolean exist = true;
    try {
      fileSys.mkdirs(myIllegalPath);
    } catch (IOException e) {
      exist = false;
    }
    assertFalse(exist);
    assertFalse(fileSys.exists(myIllegalPath));
    fileSys.delete(myFile, true);
  	
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 10
Source File: MergeTool.java    From rya with Apache License 2.0 5 votes vote down vote up
/**
 * Imports the child files that hold the table data into the parent instance as a temporary table.
 * @param childTableName the name of the child table to import into a temporary parent table.
 * @throws Exception
 */
public void importChildFilesToTempParentTable(final String childTableName) throws Exception {
    // Create a temporary table in the parent instance to import the child files to.  Then run the merge process on the parent table and temp child table.
    final String tempChildTable = childTableName + TEMP_SUFFIX;

    createTempTableIfNeeded(tempChildTable);

    final AccumuloRdfConfiguration parentAccumuloRdfConfiguration = new AccumuloRdfConfiguration(conf);
    parentAccumuloRdfConfiguration.setTablePrefix(childTablePrefix);
    final Connector parentConnector = AccumuloRyaUtils.setupConnector(parentAccumuloRdfConfiguration);
    final TableOperations parentTableOperations = parentConnector.tableOperations();

    final Path localWorkDir = CopyTool.getPath(localMergeFileImportDir, childTableName);
    final Path hdfsBaseWorkDir = CopyTool.getPath(baseImportDir, childTableName);

    CopyTool.copyLocalToHdfs(localWorkDir, hdfsBaseWorkDir, conf);

    final Path files = CopyTool.getPath(hdfsBaseWorkDir.toString(), "files");
    final Path failures = CopyTool.getPath(hdfsBaseWorkDir.toString(), "failures");
    final FileSystem fs = FileSystem.get(conf);
    // With HDFS permissions on, we need to make sure the Accumulo user can read/move the files
    fs.setPermission(hdfsBaseWorkDir, new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL));
    if (fs.exists(failures)) {
        fs.delete(failures, true);
    }
    fs.mkdirs(failures);

    parentTableOperations.importDirectory(tempChildTable, files.toString(), failures.toString(), false);

    AccumuloRyaUtils.printTablePretty(tempChildTable, conf);
}
 
Example 11
Source File: BuildGlobalHiveDictTotalBuildJob.java    From kylin with Apache License 2.0 5 votes vote down vote up
private void setInput(Job job, String input) throws IOException {
    Path path = new Path(input);
    FileSystem fs = path.getFileSystem(job.getConfiguration());
    if (!fs.exists(path)) {
        fs.mkdirs(path);
    }
    FileInputFormat.setInputPaths(job, getOptionValue(OPTION_INPUT_PATH));
}
 
Example 12
Source File: TestDistCpUtils.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Creates a new, empty directory at dirPath and always overwrites */
public static void createDirectory(FileSystem fs, Path dirPath) throws IOException {
  fs.delete(dirPath, true);
  boolean created = fs.mkdirs(dirPath);
  if (!created) {
    LOG.warn("Could not create directory " + dirPath + " this might cause test failures.");
  }
}
 
Example 13
Source File: FSAclBaseTest.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testRemoveDefaultAclMinimal() throws Exception {
  FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
  fs.removeDefaultAcl(path);
  AclStatus s = fs.getAclStatus(path);
  AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(new AclEntry[] { }, returned);
  assertPermission((short)0750);
  assertAclFeature(false);
  // restart of the cluster
  restartCluster();
  s = fs.getAclStatus(path);
  AclEntry[] afterRestart = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(returned, afterRestart);
}
 
Example 14
Source File: TestAclWithSnapshot.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testRemoveAclExceedsQuota() throws Exception {
  Path filePath = new Path(path, "file1");
  Path fileSnapshotPath = new Path(snapshotPath, "file1");
  FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0755));
  hdfs.allowSnapshot(path);
  hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
  FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
    .close();
  hdfs.setPermission(filePath, FsPermission.createImmutable((short)0600));
  List<AclEntry> aclSpec = Lists.newArrayList(
    aclEntry(ACCESS, USER, "bruce", READ_WRITE));
  hdfs.modifyAclEntries(filePath, aclSpec);

  hdfs.createSnapshot(path, snapshotName);

  AclStatus s = hdfs.getAclStatus(filePath);
  AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(new AclEntry[] {
    aclEntry(ACCESS, USER, "bruce", READ_WRITE),
    aclEntry(ACCESS, GROUP, NONE) }, returned);
  assertPermission((short)010660, filePath);

  s = hdfs.getAclStatus(fileSnapshotPath);
  returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(new AclEntry[] {
    aclEntry(ACCESS, USER, "bruce", READ_WRITE),
    aclEntry(ACCESS, GROUP, NONE) }, returned);
  assertPermission((short)010660, filePath);

  aclSpec = Lists.newArrayList(
    aclEntry(ACCESS, USER, "bruce", READ));
  hdfs.removeAcl(filePath);
}
 
Example 15
Source File: TestAclWithSnapshot.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testOriginalAclEnforcedForSnapshotContentsAfterRemoval()
    throws Exception {
  Path filePath = new Path(path, "file1");
  Path subdirPath = new Path(path, "subdir1");
  Path fileSnapshotPath = new Path(snapshotPath, "file1");
  Path subdirSnapshotPath = new Path(snapshotPath, "subdir1");
  FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0777));
  FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
    .close();
  FileSystem.mkdirs(hdfs, subdirPath, FsPermission.createImmutable(
    (short)0700));
  List<AclEntry> aclSpec = Lists.newArrayList(
    aclEntry(ACCESS, USER, READ_EXECUTE),
    aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
    aclEntry(ACCESS, GROUP, NONE),
    aclEntry(ACCESS, OTHER, NONE));
  hdfs.setAcl(filePath, aclSpec);
  hdfs.setAcl(subdirPath, aclSpec);

  assertFilePermissionGranted(fsAsBruce, BRUCE, filePath);
  assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
  assertDirPermissionGranted(fsAsBruce, BRUCE, subdirPath);
  assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);

  SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);

  // Both original and snapshot still have same ACL.
  AclEntry[] expected = new AclEntry[] {
    aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
    aclEntry(ACCESS, GROUP, NONE) };
  AclStatus s = hdfs.getAclStatus(filePath);
  AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(expected, returned);
  assertPermission((short)010550, filePath);

  s = hdfs.getAclStatus(subdirPath);
  returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(expected, returned);
  assertPermission((short)010550, subdirPath);

  s = hdfs.getAclStatus(fileSnapshotPath);
  returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(expected, returned);
  assertPermission((short)010550, fileSnapshotPath);
  assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
  assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);

  s = hdfs.getAclStatus(subdirSnapshotPath);
  returned = s.getEntries().toArray(new AclEntry[0]);
  assertArrayEquals(expected, returned);
  assertPermission((short)010550, subdirSnapshotPath);
  assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
  assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);

  hdfs.removeAcl(filePath);
  hdfs.removeAcl(subdirPath);

  // Original has changed, but snapshot still has old ACL.
  doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
    subdirSnapshotPath);
  restart(false);
  doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
    subdirSnapshotPath);
  restart(true);
  doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
    subdirSnapshotPath);
}
 
Example 16
Source File: TestSpecialCharactersInOutputPath.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
public static boolean launchJob(String fileSys,
                                     String jobTracker,
                                     JobConf conf,
                                     int numMaps,
                                     int numReduces) throws IOException {
  
  final Path inDir = new Path("/testing/input");
  final Path outDir = new Path("/testing/output");
  FileSystem fs = FileSystem.getNamed(fileSys, conf);
  fs.delete(outDir, true);
  if (!fs.mkdirs(inDir)) {
    LOG.warn("Can't create " + inDir);
    return false;
  }
  // generate an input file
  DataOutputStream file = fs.create(new Path(inDir, "part-0"));
  file.writeBytes("foo foo2 foo3");
  file.close();

  // use WordCount example
  FileSystem.setDefaultUri(conf, fileSys);
  conf.set("mapred.job.tracker", jobTracker);
  conf.setJobName("foo");

  conf.setInputFormat(TextInputFormat.class);
  conf.setOutputFormat(SpecialTextOutputFormat.class);
  conf.setOutputKeyClass(LongWritable.class);
  conf.setOutputValueClass(Text.class);
  conf.setMapperClass(IdentityMapper.class);        
  conf.setReducerClass(IdentityReducer.class);
  FileInputFormat.setInputPaths(conf, inDir);
  FileOutputFormat.setOutputPath(conf, outDir);
  conf.setNumMapTasks(numMaps);
  conf.setNumReduceTasks(numReduces);
    
  // run job and wait for completion
  RunningJob runningJob = JobClient.runJob(conf);
    
  try {
    assertTrue(runningJob.isComplete());
    assertTrue(runningJob.isSuccessful());
    assertTrue("Output folder not found!", fs.exists(new Path("/testing/output/" + OUTPUT_FILENAME)));
  } catch (NullPointerException npe) {
    // This NPE should no more happens
    fail("A NPE should not have happened.");
  }
        
  // return job result
  LOG.info("job is complete: " + runningJob.isSuccessful());
  return (runningJob.isSuccessful());
}
 
Example 17
Source File: TestHttpFSServer.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Validate XAttr get/set/remove calls.
 */
@Test
@TestDir
@TestJetty
@TestHdfs
public void testXAttrs() throws Exception {
  final String name1 = "user.a1";
  final byte[] value1 = new byte[]{0x31, 0x32, 0x33};
  final String name2 = "user.a2";
  final byte[] value2 = new byte[]{0x41, 0x42, 0x43};
  final String dir = "/xattrTest";
  final String path = dir + "/file";
  
  createHttpFSServer(false);
  
  FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
  fs.mkdirs(new Path(dir));
  
  createWithHttp(path,null);
  String statusJson = getStatus(path, "GETXATTRS");
  Map<String, byte[]> xAttrs = getXAttrs(statusJson);
  Assert.assertEquals(0, xAttrs.size());
  
  // Set two xattrs
  putCmd(path, "SETXATTR", setXAttrParam(name1, value1));
  putCmd(path, "SETXATTR", setXAttrParam(name2, value2));
  statusJson = getStatus(path, "GETXATTRS");
  xAttrs = getXAttrs(statusJson);
  Assert.assertEquals(2, xAttrs.size());
  Assert.assertArrayEquals(value1, xAttrs.get(name1));
  Assert.assertArrayEquals(value2, xAttrs.get(name2));
  
  // Remove one xattr
  putCmd(path, "REMOVEXATTR", "xattr.name=" + name1);
  statusJson = getStatus(path, "GETXATTRS");
  xAttrs = getXAttrs(statusJson);
  Assert.assertEquals(1, xAttrs.size());
  Assert.assertArrayEquals(value2, xAttrs.get(name2));
  
  // Remove another xattr, then there is no xattr
  putCmd(path, "REMOVEXATTR", "xattr.name=" + name2);
  statusJson = getStatus(path, "GETXATTRS");
  xAttrs = getXAttrs(statusJson);
  Assert.assertEquals(0, xAttrs.size());
}
 
Example 18
Source File: SpliceTestDfsPlatform.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
public void start(int nodeCount, String directory) throws Exception {
    if (dfsCluster == null) {
        conf = new Configuration();
        String keytab = directory+"/splice.keytab";
        conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, directory);
        conf.set("hadoop.security.authentication", "kerberos");
        conf.set("dfs.namenode.kerberos.principal", "hdfs/[email protected]");
        conf.set("dfs.namenode.keytab.file", keytab);
        conf.set("dfs.web.authentication.kerberos.principal", "hdfs/[email protected]");
        conf.set("dfs.web.authentication.kerberos.keytab", keytab);
        conf.set("dfs.datanode.kerberos.principal", "hdfs/[email protected]");
        conf.set("dfs.datanode.keytab.file", keytab);
        conf.set("dfs.block.access.token.enable", "true");
        conf.set(DFSConfigKeys.IGNORE_SECURE_PORTS_FOR_TESTING_KEY, "true");

        dfsCluster = new MiniDFSCluster.Builder(conf).clusterId("localDfs").format(true).numDataNodes(nodeCount).nameNodePort(58878).build();
        dfsCluster.waitActive();

        conf = dfsCluster.getConfiguration(0);
        ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
        conf.writeXml(bytesOut);
        bytesOut.close();
        //write the bytes to the file in the classpath
        OutputStream os = new FileOutputStream(new File(new File(directory, "classes"), "core-site.xml"));
        os.write(bytesOut.toByteArray());
        os.close();
        
        FileSystem fileSystem = FileSystem.get(conf);
        Path hbase = new Path("/hbase");
        fileSystem.mkdirs(hbase);
        fileSystem.setOwner(hbase, "hbase", "hbase");
        Path users = new Path("/user");
        fileSystem.mkdirs(users, FsPermission.createImmutable((short)0777));
        Path hbaseUser = new Path("/user/hbase");
        fileSystem.mkdirs(hbaseUser);
        fileSystem.setOwner(hbaseUser, "hbase", "hbase");
        Path spliceUser = new Path("/user/splice");
        fileSystem.mkdirs(spliceUser);
        fileSystem.setOwner(spliceUser, "splice", "splice");
    }
    LOG.info("HDFS cluster started, listening on port " + dfsCluster.getNameNodePort() + " writing to " + dfsCluster.getDataDirectory());
    LOG.info("Configuration " + dfsCluster.getConfiguration(0));
}
 
Example 19
Source File: FSXAttrBaseTest.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Tests for replacing xattr
 * 1. Replace an xattr using XAttrSetFlag.REPLACE.
 * 2. Replace an xattr which doesn't exist and expect an exception.
 * 3. Create multiple xattrs and replace some.
 * 4. Restart NN and save checkpoint scenarios.
 */
@Test(timeout = 120000)
public void testReplaceXAttr() throws Exception {
  FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
  fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
  fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
  
  Map<String, byte[]> xattrs = fs.getXAttrs(path);
  Assert.assertEquals(xattrs.size(), 1);
  Assert.assertArrayEquals(newValue1, xattrs.get(name1));
  
  fs.removeXAttr(path, name1);
  
  // Replace xattr which does not exist.
  try {
    fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.REPLACE));
    Assert.fail("Replacing xattr which does not exist should fail.");
  } catch (IOException e) {
  }
  
  // Create two xattrs, then replace one
  fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
  fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
  fs.setXAttr(path, name2, null, EnumSet.of(XAttrSetFlag.REPLACE));
  xattrs = fs.getXAttrs(path);
  Assert.assertEquals(xattrs.size(), 2);
  Assert.assertArrayEquals(value1, xattrs.get(name1));
  Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
  
  restart(false);
  initFileSystem();
  xattrs = fs.getXAttrs(path);
  Assert.assertEquals(xattrs.size(), 2);
  Assert.assertArrayEquals(value1, xattrs.get(name1));
  Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
  
  restart(true);
  initFileSystem();
  xattrs = fs.getXAttrs(path);
  Assert.assertEquals(xattrs.size(), 2);
  Assert.assertArrayEquals(value1, xattrs.get(name1));
  Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
  
  fs.removeXAttr(path, name1);
  fs.removeXAttr(path, name2);
}
 
Example 20
Source File: HBaseTestingUtility.java    From hbase with Apache License 2.0 3 votes vote down vote up
/**
 * Creates a hbase walDir in the user's home directory.
 * Normally you won't make use of this method. Root hbaseWALDir
 * is created for you as part of mini cluster startup. You'd only use this
 * method if you were doing manual operation.
 *
 * @return Fully qualified path to hbase root dir
 * @throws IOException
*/
public Path createWALRootDir() throws IOException {
  FileSystem fs = FileSystem.get(this.conf);
  Path walDir = getNewDataTestDirOnTestFS();
  CommonFSUtils.setWALRootDir(this.conf, walDir);
  fs.mkdirs(walDir);
  return walDir;
}