Java Code Examples for org.apache.hadoop.fs.FileSystem#getHomeDirectory()

The following examples show how to use org.apache.hadoop.fs.FileSystem#getHomeDirectory() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: big-c   File: TestLocalDFS.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests get/set working directory in DFS.
 */
@Test(timeout=30000)
public void testHomeDirectory() throws IOException {
  final String[] homeBases = new String[] {"/home", "/home/user"};
  Configuration conf = new HdfsConfiguration();
  for (final String homeBase : homeBases) {
    conf.set(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY, homeBase);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fileSys = cluster.getFileSystem();
    try {    
      // test home directory
      Path home = 
          fileSys.makeQualified(
              new Path(homeBase + "/" + getUserName(fileSys))); 
      Path fsHome = fileSys.getHomeDirectory();
      assertEquals(home, fsHome);
    } finally {
      fileSys.close();
      cluster.shutdown();
    }
  }
}
 
Example 2
Source Project: hbase   File: SpaceQuotaHelperForTests.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Bulk-loads a number of files with a number of rows to the given table.
 */
Map<byte[], List<Path>> generateFileToLoad(TableName tn, int numFiles, int numRowsPerFile)
    throws Exception {
  FileSystem fs = testUtil.getTestFileSystem();
  Path baseDir = new Path(fs.getHomeDirectory(), testName.getMethodName() + "_files");
  fs.mkdirs(baseDir);
  List<Path> hfiles = new ArrayList<>();
  for (int i = 1; i <= numFiles; i++) {
    Path hfile = new Path(baseDir, "file" + i);
    TestHRegionServerBulkLoad.createHFile(fs, hfile, Bytes.toBytes(SpaceQuotaHelperForTests.F1),
      Bytes.toBytes("my"), Bytes.toBytes("file"), numRowsPerFile);
    hfiles.add(hfile);
  }
  Map<byte[], List<Path>> family2Files = new TreeMap<>(Bytes.BYTES_COMPARATOR);
  family2Files.put(Bytes.toBytes(SpaceQuotaHelperForTests.F1), hfiles);
  return family2Files;
}
 
Example 3
public static FSDataOutputStream createFile(String file, Configuration conf, boolean s3) throws IOException { 
    if (s3) {
        Path path = new Path(file);
        FileSystem fs = FileSystem.get(path.toUri(), conf);
        
        FSDataOutputStream outputStream = fs.create(path);
        fs.close();
        
        return outputStream;
    } else {
        FileSystem hdfs = FileSystem.get(new Configuration());
        Path hdfsFile = new Path(hdfs.getHomeDirectory() + "/" + file);

        return hdfs.create(hdfsFile);
    }
}
 
Example 4
private void addToLocalResources(FileSystem fs, String fileSrcPath,
                                 String fileDstPath, int appId, Map<String, LocalResource> localResources,
                                 String resources) throws IOException {
  String suffix = appName + "/" + appId + "/" + fileDstPath;
  Path dst =
      new Path(fs.getHomeDirectory(), suffix);
  if (fileSrcPath == null) {
    FSDataOutputStream ostream = null;
    try {
      ostream = FileSystem
          .create(fs, dst, new FsPermission((short) 0710));
      ostream.writeUTF(resources);
    } finally {
      IOUtils.closeQuietly(ostream);
    }
  } else {
    fs.copyFromLocalFile(new Path(fileSrcPath), dst);
  }
  FileStatus scFileStatus = fs.getFileStatus(dst);
  LocalResource scRsrc =
      LocalResource.newInstance(
          ConverterUtils.getYarnUrlFromURI(dst.toUri()),
          LocalResourceType.FILE, LocalResourceVisibility.APPLICATION,
          scFileStatus.getLen(), scFileStatus.getModificationTime());
  localResources.put(fileDstPath, scRsrc);
}
 
Example 5
public static void removeReducerFiles(final String fileName) throws IOException {
	PathFilter filter = new PathFilter() {

        @Override
        public boolean accept(Path arg0) {
            if (arg0.getName().contains(fileName))
                return true;
            return false;
        }
    };
    
    FileSystem fs = FileSystem.get(new Configuration());
    Path path = new Path (fs.getHomeDirectory() + "/relationships");
    
    FileStatus[] status = fs.listStatus(path, filter);
    
    for (FileStatus fileStatus: status) {
    	fs.delete(new Path(fs.getHomeDirectory() + "/relationships/" + fileStatus.getPath().getName()), true);
    }
}
 
Example 6
Source Project: Bats   File: StramClientUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static Path getDTDFSRootDir(FileSystem fs, Configuration conf)
{
  String dfsRootDir = conf.get(DT_DFS_ROOT_DIR);
  if (StringUtils.isBlank(dfsRootDir)) {
    return new Path(fs.getHomeDirectory(), getDefaultRootFolder());
  }
  String userShortName = null;
  try {
    userShortName = UserGroupInformation.getLoginUser().getShortUserName();
  } catch (IOException ex) {
    LOG.warn("Error getting user login name {}", dfsRootDir, ex);
  }
  return evalDFSRootDir(fs, conf, dfsRootDir, userShortName, false);
}
 
Example 7
Source Project: zeppelin   File: HadoopUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static void cleanupStagingDirInternal(ClusterClient clusterClient) {
  try {
    ApplicationId appId = (ApplicationId) clusterClient.getClusterId();
    FileSystem fs = FileSystem.get(new Configuration());
    Path stagingDirPath = new Path(fs.getHomeDirectory(), ".flink/" + appId.toString());
    if (fs.delete(stagingDirPath, true)) {
      LOGGER.info("Deleted staging directory " + stagingDirPath);
    }
  } catch (IOException e){
      LOGGER.warn("Failed to cleanup staging dir", e);
  }
}
 
Example 8
Source Project: mrgeo   File: HadoopFileUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Returns a tmp directory, if the tmp directory doesn't exist it is created.
 *
 * @return
 * @throws IOException
 */
public static Path getTempDir(Configuration conf) throws IOException
{
  FileSystem fs = getFileSystem(conf);
  Path parent;
  parent = fs.getHomeDirectory();
  Path tmp = new Path(parent, "tmp");
  if (!fs.exists(tmp))
  {
    fs.mkdirs(tmp);
  }
  return tmp;
}
 
Example 9
Source Project: hadoop   File: Client.java    License: Apache License 2.0 5 votes vote down vote up
private void addToLocalResources(FileSystem fs, String fileSrcPath,
    String fileDstPath, String appId, Map<String, LocalResource> localResources,
    String resources) throws IOException {
  String suffix =
      appName + "/" + appId + "/" + fileDstPath;
  Path dst =
      new Path(fs.getHomeDirectory(), suffix);
  if (fileSrcPath == null) {
    FSDataOutputStream ostream = null;
    try {
      ostream = FileSystem
          .create(fs, dst, new FsPermission((short) 0710));
      ostream.writeUTF(resources);
    } finally {
      IOUtils.closeQuietly(ostream);
    }
  } else {
    fs.copyFromLocalFile(new Path(fileSrcPath), dst);
  }
  FileStatus scFileStatus = fs.getFileStatus(dst);
  LocalResource scRsrc =
      LocalResource.newInstance(
          ConverterUtils.getYarnUrlFromURI(dst.toUri()),
          LocalResourceType.FILE, LocalResourceVisibility.APPLICATION,
          scFileStatus.getLen(), scFileStatus.getModificationTime());
  localResources.put(fileDstPath, scRsrc);
}
 
Example 10
Source Project: hadoop   File: TestLocalDFS.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Tests get/set working directory in DFS.
 */
@Test
public void testWorkingDirectory() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path orig_path = fileSys.getWorkingDirectory();
    assertTrue(orig_path.isAbsolute());
    Path file1 = new Path("somewhat/random.txt");
    writeFile(fileSys, file1);
    assertTrue(fileSys.exists(new Path(orig_path, file1.toString())));
    fileSys.delete(file1, true);
    Path subdir1 = new Path("/somewhere");
    fileSys.setWorkingDirectory(subdir1);
    writeFile(fileSys, file1);
    cleanupFile(fileSys, new Path(subdir1, file1.toString()));
    Path subdir2 = new Path("else");
    fileSys.setWorkingDirectory(subdir2);
    writeFile(fileSys, file1);
    readFile(fileSys, file1);
    cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()),
                                  file1.toString()));

    // test home directory
    Path home = 
      fileSys.makeQualified(
          new Path(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT
              + "/" + getUserName(fileSys))); 
    Path fsHome = fileSys.getHomeDirectory();
    assertEquals(home, fsHome);

  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 11
Source Project: RDFS   File: TestLocalDFS.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Tests get/set working directory in DFS.
 */
public void testWorkingDirectory() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path orig_path = fileSys.getWorkingDirectory();
    assertTrue(orig_path.isAbsolute());
    Path file1 = new Path("somewhat/random.txt");
    writeFile(fileSys, file1);
    assertTrue(fileSys.exists(new Path(orig_path, file1.toString())));
    fileSys.delete(file1, true);
    Path subdir1 = new Path("/somewhere");
    fileSys.setWorkingDirectory(subdir1);
    writeFile(fileSys, file1);
    cleanupFile(fileSys, new Path(subdir1, file1.toString()));
    Path subdir2 = new Path("else");
    fileSys.setWorkingDirectory(subdir2);
    writeFile(fileSys, file1);
    readFile(fileSys, file1);
    cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()),
                                  file1.toString()));

    // test home directory
    Path home = new Path("/user/" + getUserName(fileSys))
      .makeQualified(fileSys);
    Path fsHome = fileSys.getHomeDirectory();
    assertEquals(home, fsHome);

  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 12
Source Project: big-c   File: Client.java    License: Apache License 2.0 5 votes vote down vote up
private void addToLocalResources(FileSystem fs, String fileSrcPath,
    String fileDstPath, String appId, Map<String, LocalResource> localResources,
    String resources) throws IOException {
  String suffix =
      appName + "/" + appId + "/" + fileDstPath;
  Path dst =
      new Path(fs.getHomeDirectory(), suffix);
  if (fileSrcPath == null) {
    FSDataOutputStream ostream = null;
    try {
      ostream = FileSystem
          .create(fs, dst, new FsPermission((short) 0710));
      ostream.writeUTF(resources);
    } finally {
      IOUtils.closeQuietly(ostream);
    }
  } else {
    fs.copyFromLocalFile(new Path(fileSrcPath), dst);
  }
  FileStatus scFileStatus = fs.getFileStatus(dst);
  LocalResource scRsrc =
      LocalResource.newInstance(
          ConverterUtils.getYarnUrlFromURI(dst.toUri()),
          LocalResourceType.FILE, LocalResourceVisibility.APPLICATION,
          scFileStatus.getLen(), scFileStatus.getModificationTime());
  localResources.put(fileDstPath, scRsrc);
}
 
Example 13
Source Project: big-c   File: TestLocalDFS.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Tests get/set working directory in DFS.
 */
@Test
public void testWorkingDirectory() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path orig_path = fileSys.getWorkingDirectory();
    assertTrue(orig_path.isAbsolute());
    Path file1 = new Path("somewhat/random.txt");
    writeFile(fileSys, file1);
    assertTrue(fileSys.exists(new Path(orig_path, file1.toString())));
    fileSys.delete(file1, true);
    Path subdir1 = new Path("/somewhere");
    fileSys.setWorkingDirectory(subdir1);
    writeFile(fileSys, file1);
    cleanupFile(fileSys, new Path(subdir1, file1.toString()));
    Path subdir2 = new Path("else");
    fileSys.setWorkingDirectory(subdir2);
    writeFile(fileSys, file1);
    readFile(fileSys, file1);
    cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()),
                                  file1.toString()));

    // test home directory
    Path home = 
      fileSys.makeQualified(
          new Path(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT
              + "/" + getUserName(fileSys))); 
    Path fsHome = fileSys.getHomeDirectory();
    assertEquals(home, fsHome);

  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 14
Source Project: hadoop-gpu   File: TestLocalDFS.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Tests get/set working directory in DFS.
 */
public void testWorkingDirectory() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path orig_path = fileSys.getWorkingDirectory();
    assertTrue(orig_path.isAbsolute());
    Path file1 = new Path("somewhat/random.txt");
    writeFile(fileSys, file1);
    assertTrue(fileSys.exists(new Path(orig_path, file1.toString())));
    fileSys.delete(file1, true);
    Path subdir1 = new Path("/somewhere");
    fileSys.setWorkingDirectory(subdir1);
    writeFile(fileSys, file1);
    cleanupFile(fileSys, new Path(subdir1, file1.toString()));
    Path subdir2 = new Path("else");
    fileSys.setWorkingDirectory(subdir2);
    writeFile(fileSys, file1);
    readFile(fileSys, file1);
    cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()),
                                  file1.toString()));

    // test home directory
    Path home = new Path("/user/" + getUserName(fileSys))
      .makeQualified(fileSys);
    Path fsHome = fileSys.getHomeDirectory();
    assertEquals(home, fsHome);

  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 15
private Map<String, String> getAMEnvironment(Map<String, LocalResource> localResources
    , FileSystem fs) throws IOException{
  Map<String, String> env = new HashMap<String, String>();

  // Set ApplicationMaster jar file
  LocalResource appJarResource = localResources.get(Constants.AM_JAR_NAME);
  Path hdfsAppJarPath = new Path(fs.getHomeDirectory(), appJarResource.getResource().getFile());
  FileStatus hdfsAppJarStatus = fs.getFileStatus(hdfsAppJarPath);
  long hdfsAppJarLength = hdfsAppJarStatus.getLen();
  long hdfsAppJarTimestamp = hdfsAppJarStatus.getModificationTime();

  env.put(Constants.AM_JAR_PATH, hdfsAppJarPath.toString());
  env.put(Constants.AM_JAR_TIMESTAMP, Long.toString(hdfsAppJarTimestamp));
  env.put(Constants.AM_JAR_LENGTH, Long.toString(hdfsAppJarLength));

  // Add AppMaster.jar location to classpath
  // At some point we should not be required to add
  // the hadoop specific classpaths to the env.
  // It should be provided out of the box.
  // For now setting all required classpaths including
  // the classpath to "." for the application jar
  StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$$())
      .append(ApplicationConstants.CLASS_PATH_SEPARATOR).append("./*");
  for (String c : conf.getStrings(
      YarnConfiguration.YARN_APPLICATION_CLASSPATH,
      YarnConfiguration.DEFAULT_YARN_CROSS_PLATFORM_APPLICATION_CLASSPATH)) {
    classPathEnv.append(ApplicationConstants.CLASS_PATH_SEPARATOR);
    classPathEnv.append(c.trim());
  }
  env.put("CLASSPATH", classPathEnv.toString());

  return env;
}
 
Example 16
Source Project: samoa   File: SystemsUtils.java    License: Apache License 2.0 5 votes vote down vote up
static String getDefaultSAMOADir() throws IOException {
	Configuration config = new Configuration();
	config.addResource(new Path(coreConfPath));
	config.addResource(new Path(hdfsConfPath));
	
	FileSystem fs = FileSystem.get(config);
	Path defaultDir = new Path(fs.getHomeDirectory(),".samoa");
	return defaultDir.toString();
}
 
Example 17
/**
 * Returns the Path where the YARN application files should be uploaded to.
 *
 * @param appId YARN application id
 */
private Path getYarnFilesDir(final ApplicationId appId) throws IOException {
	final FileSystem fileSystem = FileSystem.get(yarnConfiguration);
	final Path homeDir = fileSystem.getHomeDirectory();
	return new Path(homeDir, ".flink/" + appId + '/');
}
 
Example 18
@Test
public void testSubmitCoordinator() throws Exception {

    LOG.info("OOZIE: Test Submit Coordinator Start");

    FileSystem hdfsFs = hdfsLocalCluster.getHdfsFileSystemHandle();
    OozieClient oozie = oozieLocalServer.getOozieCoordClient();

    Path appPath = new Path(hdfsFs.getHomeDirectory(), "testApp");
    hdfsFs.mkdirs(new Path(appPath, "lib"));
    Path workflow = new Path(appPath, "workflow.xml");
    Path coordinator = new Path(appPath, "coordinator.xml");

    //write workflow.xml
    String wfApp =
            "<workflow-app xmlns='uri:oozie:workflow:0.1' name='test-wf'>" +
                    "    <start to='end'/>" +
                    "    <end name='end'/>" +
                    "</workflow-app>";

    String coordApp =
            "<coordinator-app timezone='UTC' end='2016-07-26T02:26Z' start='2016-07-26T01:26Z' frequency='${coord:hours(1)}' name='test-coordinator' xmlns='uri:oozie:coordinator:0.4'>" +
                    "    <action>" +
                    "        <workflow>" +
                    "            <app-path>" + workflow.toString() + "</app-path>" +
                    "        </workflow>" +
                    "    </action>" +
                    "</coordinator-app>";

    Writer writer = new OutputStreamWriter(hdfsFs.create(workflow));
    writer.write(wfApp);
    writer.close();

    Writer coordWriter = new OutputStreamWriter(hdfsFs.create(coordinator));
    coordWriter.write(coordApp);
    coordWriter.close();

    //write job.properties
    Properties conf = oozie.createConfiguration();
    conf.setProperty(OozieClient.COORDINATOR_APP_PATH, coordinator.toString());
    conf.setProperty(OozieClient.USER_NAME, UserGroupInformation.getCurrentUser().getUserName());

    //submit and check
    final String jobId = oozie.submit(conf);
    CoordinatorJob coord  = oozie.getCoordJobInfo(jobId);
    assertNotNull(coord);
    assertEquals(Job.Status.PREP, coord.getStatus());

    LOG.info("OOZIE: Coordinator: {}", coord.toString());
    hdfsFs.close();
}
 
Example 19
Source Project: hadoop   File: FSOperations.java    License: Apache License 2.0 3 votes vote down vote up
/**
 * Executes the filesystem operation.
 *
 * @param fs filesystem instance to use.
 *
 * @return a JSON object with the user home directory.
 *
 * @throws IOException thrown if an IO error occured.
 */
@Override
@SuppressWarnings("unchecked")
public JSONObject execute(FileSystem fs) throws IOException {
  Path homeDir = fs.getHomeDirectory();
  JSONObject json = new JSONObject();
  json.put(HttpFSFileSystem.HOME_DIR_JSON, homeDir.toUri().getPath());
  return json;
}
 
Example 20
Source Project: big-c   File: FSOperations.java    License: Apache License 2.0 3 votes vote down vote up
/**
 * Executes the filesystem operation.
 *
 * @param fs filesystem instance to use.
 *
 * @return a JSON object with the user home directory.
 *
 * @throws IOException thrown if an IO error occured.
 */
@Override
@SuppressWarnings("unchecked")
public JSONObject execute(FileSystem fs) throws IOException {
  Path homeDir = fs.getHomeDirectory();
  JSONObject json = new JSONObject();
  json.put(HttpFSFileSystem.HOME_DIR_JSON, homeDir.toUri().getPath());
  return json;
}