Java Code Examples for org.apache.hadoop.fs.FileSystem#getWorkingDirectory()

The following examples show how to use org.apache.hadoop.fs.FileSystem#getWorkingDirectory() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseTestHttpFSWith.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void testWorkingdirectory() throws Exception {
  FileSystem fs = FileSystem.get(getProxiedFSConf());
  Path workingDir = fs.getWorkingDirectory();
  fs.close();

  fs = getHttpFSFileSystem();
  if (isLocalFS()) {
    fs.setWorkingDirectory(workingDir);
  }
  Path httpFSWorkingDir = fs.getWorkingDirectory();
  fs.close();
  Assert.assertEquals(httpFSWorkingDir.toUri().getPath(),
                      workingDir.toUri().getPath());

  fs = getHttpFSFileSystem();
  fs.setWorkingDirectory(new Path("/tmp"));
  workingDir = fs.getWorkingDirectory();
  fs.close();
  Assert.assertEquals(workingDir.toUri().getPath(), new Path("/tmp").toUri().getPath());
}
 
Example 2
Source File: YarnFileStageTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies that nested directories are properly copied with a <tt>hdfs://</tt> file
 * system (from a <tt>/absolute/path</tt> source path).
 */
@Test
public void testCopyFromLocalRecursiveWithoutScheme() throws Exception {
	final FileSystem targetFileSystem = hdfsRootPath.getFileSystem(hadoopConfig);
	final Path targetDir = targetFileSystem.getWorkingDirectory();

	testCopyFromLocalRecursive(targetFileSystem, targetDir, tempFolder, false);
}
 
Example 3
Source File: YarnFileStageTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies that nested directories are properly copied with a <tt>hdfs://</tt> file
 * system (from a <tt>file:///absolute/path</tt> source path).
 */
@Test
public void testCopyFromLocalRecursiveWithScheme() throws Exception {
	final FileSystem targetFileSystem = hdfsRootPath.getFileSystem(hadoopConfig);
	final Path targetDir = targetFileSystem.getWorkingDirectory();

	testCopyFromLocalRecursive(targetFileSystem, targetDir, tempFolder, true);
}
 
Example 4
Source File: YarnFileStageTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies that nested directories are properly copied with a <tt>hdfs://</tt> file
 * system (from a <tt>/absolute/path</tt> source path).
 */
@Test
public void testCopyFromLocalRecursiveWithoutScheme() throws Exception {
	final FileSystem targetFileSystem = hdfsRootPath.getFileSystem(hadoopConfig);
	final Path targetDir = targetFileSystem.getWorkingDirectory();

	testCopyFromLocalRecursive(targetFileSystem, targetDir, tempFolder, false);
}
 
Example 5
Source File: CopyTable.java    From hbase with Apache License 2.0 5 votes vote down vote up
private Path generateUniqTempDir(boolean withDirCreated) throws IOException {
  FileSystem fs = CommonFSUtils.getCurrentFileSystem(getConf());
  Path dir = new Path(fs.getWorkingDirectory(), NAME);
  if (!fs.exists(dir)) {
    fs.mkdirs(dir);
  }
  Path newDir = new Path(dir, UUID.randomUUID().toString());
  if (withDirCreated) {
    fs.mkdirs(newDir);
  }
  return newDir;
}
 
Example 6
Source File: PersistedHDFSManager.java    From Knowage-Server with GNU Affero General Public License v3.0 5 votes vote down vote up
public FSDataOutputStream openHdfsFile(String fileName, String folderName) {
	logger.debug("Begin file opening");
	FSDataOutputStream fsOS = null;
	Path filePath = null;
	try {
		FileSystem fs = hdfs.getFs();
		filePath = fs.getWorkingDirectory();
		if (folderName != null && folderName.length() > 0) {
			filePath = Path.mergePaths(filePath, new Path(Path.SEPARATOR, folderName));
			if (!fs.exists(filePath) || !fs.isDirectory(filePath)) {
				fs.mkdirs(filePath);
			}
		}
		filePath = Path.mergePaths(filePath, new Path(Path.SEPARATOR + fileName));
		boolean existsFile = fs.exists(filePath);
		if (existsFile) {
			logger.debug("File is already present in folder, it will be deleted and replaced with new file");
			fs.delete(filePath, true);
		}
		fsOS = fs.create(filePath, true);
	} catch (IOException e) {
		logger.error("Impossible to open file in File System");
		throw new SpagoBIRuntimeException("Impossible to open file in File System" + e);
	}
	logger.debug("File opened");
	return fsOS;
}
 
Example 7
Source File: YarnFileStageTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies that a single file is properly copied.
 */
@Test
public void testCopySingleFileFromLocal() throws IOException, URISyntaxException, InterruptedException {
	final FileSystem targetFileSystem = hdfsRootPath.getFileSystem(hadoopConfig);
	final Path targetDir = targetFileSystem.getWorkingDirectory();

	testCopySingleFileFromLocal(targetFileSystem, targetDir, LOCAL_RESOURCE_DIRECTORY, tempFolder);
}
 
Example 8
Source File: YarnFileStageTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testRegisterMultipleLocalResourcesWithRemoteFiles() throws Exception {
	final FileSystem targetFileSystem = hdfsRootPath.getFileSystem(hadoopConfig);
	final Path targetDir = targetFileSystem.getWorkingDirectory();

	testRegisterMultipleLocalResources(targetFileSystem, targetDir, LOCAL_RESOURCE_DIRECTORY, tempFolder, true, true);
}
 
Example 9
Source File: YarnFileStageTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies that nested directories are properly copied with a <tt>hdfs://</tt> file
 * system (from a <tt>file:///absolute/path</tt> source path).
 */
@Test
public void testCopyFromLocalRecursiveWithScheme() throws Exception {
	final FileSystem targetFileSystem = hdfsRootPath.getFileSystem(hadoopConfig);
	final Path targetDir = targetFileSystem.getWorkingDirectory();

	testCopyFromLocalRecursive(targetFileSystem, targetDir, tempFolder, true);
}
 
Example 10
Source File: TestHostsFiles.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testHostsIncludeForDeadCount() throws Exception {
  Configuration conf = getConf();

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  StringBuilder includeHosts = new StringBuilder();
  includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
      .append("\n");
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    assertTrue(ns.getNumDeadDataNodes() == 2);
    assertTrue(ns.getNumLiveDataNodes() == 0);

    // Testing using MBeans
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName = new ObjectName(
        "Hadoop:service=NameNode,name=FSNamesystemState");
    String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
    assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
    assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 11
Source File: TestHostsFiles.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testHostsIncludeForDeadCount() throws Exception {
  Configuration conf = getConf();

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  StringBuilder includeHosts = new StringBuilder();
  includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
      .append("\n");
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    final FSNamesystem ns = cluster.getNameNode().getNamesystem();
    assertTrue(ns.getNumDeadDataNodes() == 2);
    assertTrue(ns.getNumLiveDataNodes() == 0);

    // Testing using MBeans
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName = new ObjectName(
        "Hadoop:service=NameNode,name=FSNamesystemState");
    String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
    assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
    assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 12
Source File: TestLocalDFS.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Tests get/set working directory in DFS.
 */
@Test
public void testWorkingDirectory() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fileSys = cluster.getFileSystem();
  try {
    Path orig_path = fileSys.getWorkingDirectory();
    assertTrue(orig_path.isAbsolute());
    Path file1 = new Path("somewhat/random.txt");
    writeFile(fileSys, file1);
    assertTrue(fileSys.exists(new Path(orig_path, file1.toString())));
    fileSys.delete(file1, true);
    Path subdir1 = new Path("/somewhere");
    fileSys.setWorkingDirectory(subdir1);
    writeFile(fileSys, file1);
    cleanupFile(fileSys, new Path(subdir1, file1.toString()));
    Path subdir2 = new Path("else");
    fileSys.setWorkingDirectory(subdir2);
    writeFile(fileSys, file1);
    readFile(fileSys, file1);
    cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()),
                                  file1.toString()));

    // test home directory
    Path home = 
      fileSys.makeQualified(
          new Path(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT
              + "/" + getUserName(fileSys))); 
    Path fsHome = fileSys.getHomeDirectory();
    assertEquals(home, fsHome);

  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 13
Source File: YarnFileStageTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies that nested directories are properly copied with a <tt>hdfs://</tt> file
 * system (from a <tt>file:///absolute/path</tt> source path).
 */
@Test
public void testCopyFromLocalRecursiveWithScheme() throws Exception {
	final FileSystem targetFileSystem = hdfsRootPath.getFileSystem(hadoopConfig);
	final Path targetDir = targetFileSystem.getWorkingDirectory();

	testRegisterMultipleLocalResources(targetFileSystem, targetDir, LOCAL_RESOURCE_DIRECTORY, tempFolder, true, false);
}
 
Example 14
Source File: TestNativeAzureFileSystemOperationsMocked.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public Path getAbsoluteTestRootPath(FileSystem fSys) {
  Path testRootPath = new Path(TEST_ROOT_DIR);
  if (testRootPath.isAbsolute()) {
    return testRootPath;
  } else {
    return new Path(fSys.getWorkingDirectory(), TEST_ROOT_DIR);
  }
}
 
Example 15
Source File: TestBlocksWithNotEnoughRacks.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() 
    throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 5;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());

  // All hosts are on two racks, only one host on /rack2
  String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Lower the replication factor so the blocks are over replicated
    REPLICATION_FACTOR = 2;
    fs.setReplication(filePath, REPLICATION_FACTOR);

    // Decommission one of the hosts with the block that is not on
    // the lone host on rack2 (if we decomission that host it would
    // be impossible to respect the rack policy).
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    for (String top : locs[0].getTopologyPaths()) {
      if (!top.startsWith("/rack2")) {
        String name = top.substring("/rack1".length()+1);
        DFSTestUtil.writeFile(localFileSys, excludeFile, name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);
        break;
      }
    }

    // Check the block still has sufficient # replicas across racks,
    // ie we didn't remove the replica on the host on /rack1.
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
Example 16
Source File: TestBlocksWithNotEnoughRacks.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testNodeDecomissionRespectsRackPolicy() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  // Two blocks and four racks
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decommission one of the hosts with the block, this should cause 
    // the block to get replicated to another host on the same rack,
    // otherwise the rack policy is violated.
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    String name = locs[0].getNames()[0];
    DFSTestUtil.writeFile(localFileSys, excludeFile, name);
    ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
    DFSTestUtil.waitForDecommission(fs, name);

    // Check the block still has sufficient # replicas across racks
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
Example 17
Source File: TestDecommission.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
/**
 * Tests Decommission in DFS.
 */
public void testDecommission() throws IOException {
  Configuration conf = new Configuration();
  conf.setBoolean("dfs.replication.considerLoad", false);

  // Set up the hosts/exclude files.
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/work-dir/decommission");
  assertTrue(localFileSys.mkdirs(dir));
  hostsFile = new Path(dir, "hosts");
  excludeFile = new Path(dir, "exclude");
  conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());
  conf.setInt("heartbeat.recheck.interval", 2000);
  conf.setInt("dfs.heartbeat.interval", 1);
  conf.setInt("dfs.replication.pending.timeout.sec", 4);
  writeConfigFile(localFileSys, excludeFile, null);

  MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
  cluster.waitActive();
  InetSocketAddress addr = new InetSocketAddress("localhost", 
                                                 cluster.getNameNodePort());
  DFSClient client = new DFSClient(addr, conf);
  DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
  assertEquals("Number of Datanodes ", numDatanodes, info.length);
  FileSystem fileSys = cluster.getFileSystem();

  try {
    for (int iteration = 0; iteration < numDatanodes - 1; iteration++) {
      int replicas = numDatanodes - iteration - 1;
      //
      // Decommission one node. Verify that node is decommissioned.
      // 
      Path file1 = new Path("decommission.dat");
      writeFile(fileSys, file1, replicas);
      System.out.println("Created file decommission.dat with " +
                         replicas + " replicas.");
      checkFile(fileSys, file1, replicas);
      printFileLocations(fileSys, file1);
      String downnode = decommissionNode(cluster.getNameNode(), conf,
                                         client, localFileSys);
      decommissionedNodes.add(downnode);
      waitNodeState(fileSys, downnode, NodeState.DECOMMISSIONED);
      checkFile(fileSys, file1, replicas, downnode);
      cleanupFile(fileSys, file1);
      cleanupFile(localFileSys, dir);
    }
  } catch (IOException e) {
    info = client.datanodeReport(DatanodeReportType.ALL);
    printDatanodeReport(info);
    throw e;
  } finally {
    fileSys.close();
    cluster.shutdown();
  }
}
 
Example 18
Source File: TestBlocksWithNotEnoughRacks.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testNodeDecomissionWithOverreplicationRespectsRackPolicy() 
    throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 5;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());

  // All hosts are on two racks, only one host on /rack2
  String racks[] = {"/rack1", "/rack2", "/rack1", "/rack1", "/rack1"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Lower the replication factor so the blocks are over replicated
    REPLICATION_FACTOR = 2;
    fs.setReplication(filePath, REPLICATION_FACTOR);

    // Decommission one of the hosts with the block that is not on
    // the lone host on rack2 (if we decomission that host it would
    // be impossible to respect the rack policy).
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    for (String top : locs[0].getTopologyPaths()) {
      if (!top.startsWith("/rack2")) {
        String name = top.substring("/rack1".length()+1);
        DFSTestUtil.writeFile(localFileSys, excludeFile, name);
        ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
        DFSTestUtil.waitForDecommission(fs, name);
        break;
      }
    }

    // Check the block still has sufficient # replicas across racks,
    // ie we didn't remove the replica on the host on /rack1.
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
Example 19
Source File: AbstractHadoopProcessor.java    From nifi with Apache License 2.0 4 votes vote down vote up
HdfsResources resetHDFSResources(String configResources, ProcessContext context) throws IOException {
    Configuration config = new ExtendedConfiguration(getLogger());
    config.setClassLoader(Thread.currentThread().getContextClassLoader());

    getConfigurationFromResources(config, configResources);

    // give sub-classes a chance to process configuration
    preProcessConfiguration(config, context);

    // first check for timeout on HDFS connection, because FileSystem has a hard coded 15 minute timeout
    checkHdfsUriForTimeout(config);

    // disable caching of Configuration and FileSystem objects, else we cannot reconfigure the processor without a complete
    // restart
    String disableCacheName = String.format("fs.%s.impl.disable.cache", FileSystem.getDefaultUri(config).getScheme());
    config.set(disableCacheName, "true");

    // If kerberos is enabled, create the file system as the kerberos principal
    // -- use RESOURCE_LOCK to guarantee UserGroupInformation is accessed by only a single thread at at time
    FileSystem fs;
    UserGroupInformation ugi;
    KerberosUser kerberosUser;
    synchronized (RESOURCES_LOCK) {
        if (SecurityUtil.isSecurityEnabled(config)) {
            String principal = context.getProperty(kerberosProperties.getKerberosPrincipal()).evaluateAttributeExpressions().getValue();
            String keyTab = context.getProperty(kerberosProperties.getKerberosKeytab()).evaluateAttributeExpressions().getValue();
            String password = context.getProperty(kerberosProperties.getKerberosPassword()).getValue();

            // If the Kerberos Credentials Service is specified, we need to use its configuration, not the explicit properties for principal/keytab.
            // The customValidate method ensures that only one can be set, so we know that the principal & keytab above are null.
            final KerberosCredentialsService credentialsService = context.getProperty(KERBEROS_CREDENTIALS_SERVICE).asControllerService(KerberosCredentialsService.class);
            if (credentialsService != null) {
                principal = credentialsService.getPrincipal();
                keyTab = credentialsService.getKeytab();
            }

            if (keyTab != null) {
                kerberosUser = new KerberosKeytabUser(principal, keyTab);
            } else if (password != null) {
                kerberosUser = new KerberosPasswordUser(principal, password);
            } else {
                throw new IOException("Unable to authenticate with Kerberos, no keytab or password was provided");
            }
            ugi = SecurityUtil.getUgiForKerberosUser(config, kerberosUser);
        } else {
            config.set("ipc.client.fallback-to-simple-auth-allowed", "true");
            config.set("hadoop.security.authentication", "simple");
            ugi = SecurityUtil.loginSimple(config);
            kerberosUser = null;
        }
        fs = getFileSystemAsUser(config, ugi);
    }
    getLogger().debug("resetHDFSResources UGI [{}], KerberosUser [{}]", new Object[]{ugi, kerberosUser});

    final Path workingDir = fs.getWorkingDirectory();
    getLogger().info("Initialized a new HDFS File System with working dir: {} default block size: {} default replication: {} config: {}",
            new Object[]{workingDir, fs.getDefaultBlockSize(workingDir), fs.getDefaultReplication(workingDir), config.toString()});

    return new HdfsResources(config, fs, ugi, kerberosUser);
}
 
Example 20
Source File: TestHostsFiles.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testHostsExcludeInUI() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 2;
  final Path filePath = new Path("/testFile");

  // Configure an excludes file
  FileSystem localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, "build/test/data/temp/decommission");
  Path excludeFile = new Path(dir, "exclude");
  Path includeFile = new Path(dir, "include");
  assertTrue(localFileSys.mkdirs(dir));
  DFSTestUtil.writeFile(localFileSys, excludeFile, "");
  DFSTestUtil.writeFile(localFileSys, includeFile, "");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());

  // Two blocks and four racks
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decommission one of the hosts with the block, this should cause 
    // the block to get replicated to another host on the same rack,
    // otherwise the rack policy is violated.
    BlockLocation locs[] = fs.getFileBlockLocations(
        fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
    String name = locs[0].getNames()[0];
    String names = name + "\n" + "localhost:42\n";
    LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
    DFSTestUtil.writeFile(localFileSys, excludeFile, name);
    ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
    DFSTestUtil.waitForDecommission(fs, name);

    // Check the block still has sufficient # replicas across racks
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
    
    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
    ObjectName mxbeanName =
        new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
    String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
    assertTrue("Live nodes should contain the decommissioned node",
        nodes.contains("Decommissioned"));
  } finally {
    cluster.shutdown();
  }
}