Java Code Examples for org.apache.hadoop.hdfs.HdfsConfiguration#set()

The following examples show how to use org.apache.hadoop.hdfs.HdfsConfiguration#set() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestStartup.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  config = new HdfsConfiguration();
  hdfsDir = new File(MiniDFSCluster.getBaseDirectory());

  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name")).toString());
  config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
      new File(hdfsDir, "data").getPath());
  config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      fileAsURI(new File(hdfsDir, "secondary")).toString());
  config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
      WILDCARD_HTTP_HOST + "0");
  
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
 
Example 2
Source File: TestGetConf.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void TestGetConfIncludeCommand() throws Exception{
	HdfsConfiguration conf = new HdfsConfiguration();
  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, System.getProperty("test.build.data", "target/test/data") + "/Getconf/");
  Path hostsFile = new Path(dir, "hosts");
  Path excludeFile = new Path(dir, "exclude");
  
  // Setup conf
  conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  writeConfigFile(hostsFile, null);
  writeConfigFile(excludeFile, null);    
  String[] args = {"-includeFile"};
  String ret = runTool(conf, args, true);
  assertEquals(hostsFile.toUri().getPath(),ret.trim());
  cleanupFile(localFileSys, excludeFile.getParent());
}
 
Example 3
Source File: TestGetConf.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void TestGetConfExcludeCommand() throws Exception{
	HdfsConfiguration conf = new HdfsConfiguration();
  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  Path dir = new Path(workingDir, System.getProperty("test.build.data", "target/test/data") + "/Getconf/");
  Path hostsFile = new Path(dir, "hosts");
  Path excludeFile = new Path(dir, "exclude");
  
  // Setup conf
  conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  writeConfigFile(hostsFile, null);
  writeConfigFile(excludeFile, null);    
  String[] args = {"-excludeFile"};
  String ret = runTool(conf, args, true);
  assertEquals(excludeFile.toUri().getPath(),ret.trim());
  cleanupFile(localFileSys, excludeFile.getParent());
}
 
Example 4
Source File: TestSaslDataTransfer.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrivacy() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig(
    "authentication,integrity,privacy");
  startCluster(clusterConf);
  HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
  clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "privacy");
  doTest(clientConf);
}
 
Example 5
Source File: TestGetConf.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** Setup federation nameServiceIds in the configuration */
private void setupNameServices(HdfsConfiguration conf, int nameServiceIdCount) {
  StringBuilder nsList = new StringBuilder();
  for (int i = 0; i < nameServiceIdCount; i++) {
    if (nsList.length() > 0) {
      nsList.append(",");
    }
    nsList.append(getNameServiceId(i));
  }
  conf.set(DFS_NAMESERVICES, nsList.toString());
}
 
Example 6
Source File: TestDFSHAAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that, if automatic HA is enabled, none of the mutative operations
 * will succeed, unless the -forcemanual flag is specified.
 * @throws Exception
 */
@Test
public void testMutativeOperationsWithAutoHaEnabled() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
  
  // Turn on auto-HA in the config
  HdfsConfiguration conf = getHAConf();
  conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);

  // Should fail without the forcemanual flag
  assertEquals(-1, runTool("-transitionToActive", "nn1"));
  assertTrue(errOutput.contains("Refusing to manually manage"));
  assertEquals(-1, runTool("-transitionToStandby", "nn1"));
  assertTrue(errOutput.contains("Refusing to manually manage"));

  Mockito.verify(mockProtocol, Mockito.never())
    .transitionToActive(anyReqInfo());
  Mockito.verify(mockProtocol, Mockito.never())
    .transitionToStandby(anyReqInfo());

  // Force flag should bypass the check and change the request source
  // for the RPC
  setupConfirmationOnSystemIn();
  assertEquals(0, runTool("-transitionToActive", "-forcemanual", "nn1"));
  setupConfirmationOnSystemIn();
  assertEquals(0, runTool("-transitionToStandby", "-forcemanual", "nn1"));

  Mockito.verify(mockProtocol, Mockito.times(1)).transitionToActive(
      reqInfoCaptor.capture());
  Mockito.verify(mockProtocol, Mockito.times(1)).transitionToStandby(
      reqInfoCaptor.capture());
  
  // All of the RPCs should have had the "force" source
  for (StateChangeRequestInfo ri : reqInfoCaptor.getAllValues()) {
    assertEquals(RequestSource.REQUEST_BY_USER_FORCED, ri.getSource());
  }
}
 
Example 7
Source File: TestGetConf.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=10000)
public void testGetSpecificKey() throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set("mykey", " myval ");
  String[] args = {"-confKey", "mykey"};
  String toolResult = runTool(conf, args, true);
  assertEquals(String.format("myval%n"), toolResult);
}
 
Example 8
Source File: TestGetConf.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP},
 * {@link Command#SECONDARY} and {@link Command#NNRPCADDRESSES}
 */
@Test(timeout=10000)
public void testTool() throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration(false);
  for (Command cmd : Command.values()) {
    CommandHandler handler = Command.getHandler(cmd.getName());
    if (handler.key != null && !"-confKey".equals(cmd.getName())) {
      // Add the key to the conf and ensure tool returns the right value
      String[] args = {cmd.getName()};
      conf.set(handler.key, "value");
      assertTrue(runTool(conf, args, true).contains("value"));
    }
  }
}
 
Example 9
Source File: TestDFSHAAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testFailoverWithAutoHa() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
  // Turn on auto-HA in the config
  HdfsConfiguration conf = getHAConf();
  conf.setBoolean(DFSConfigKeys.DFS_HA_AUTO_FAILOVER_ENABLED_KEY, true);
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);

  assertEquals(0, runTool("-failover", "nn1", "nn2"));
  Mockito.verify(mockZkfcProtocol).gracefulFailover();
}
 
Example 10
Source File: TestGetConf.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Setup federation nameServiceIds in the configuration */
private void setupNameServices(HdfsConfiguration conf, int nameServiceIdCount) {
  StringBuilder nsList = new StringBuilder();
  for (int i = 0; i < nameServiceIdCount; i++) {
    if (nsList.length() > 0) {
      nsList.append(",");
    }
    nsList.append(getNameServiceId(i));
  }
  conf.set(DFS_NAMESERVICES, nsList.toString());
}
 
Example 11
Source File: TestSaslDataTransfer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testDataNodeAbortsIfNotHttpsOnly() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig("authentication");
  clusterConf.set(DFS_HTTP_POLICY_KEY,
    HttpConfig.Policy.HTTP_AND_HTTPS.name());
  exception.expect(RuntimeException.class);
  exception.expectMessage("Cannot start secure DataNode");
  startCluster(clusterConf);
}
 
Example 12
Source File: TestCreateEditsLog.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that an edits log created using CreateEditsLog is valid and can be
 * loaded successfully by a namenode.
 */
@Test(timeout=60000)
public void testCanLoadCreatedEditsLog() throws Exception {
  // Format namenode.
  HdfsConfiguration conf = new HdfsConfiguration();
  File nameDir = new File(HDFS_DIR, "name");
  conf.set(DFS_NAMENODE_NAME_DIR_KEY, Util.fileAsURI(nameDir).toString());
  DFSTestUtil.formatNameNode(conf);

  // Call CreateEditsLog and move the resulting edits to the name dir.
  CreateEditsLog.main(new String[] { "-f", "1000", "0", "1", "-d",
    TEST_DIR.getAbsolutePath() });
  Path editsWildcard = new Path(TEST_DIR.getAbsolutePath(), "*");
  FileContext localFc = FileContext.getLocalFSFileContext();
  for (FileStatus edits: localFc.util().globStatus(editsWildcard)) {
    Path src = edits.getPath();
    Path dst = new Path(new File(nameDir, "current").getAbsolutePath(),
      src.getName());
    localFc.rename(src, dst);
  }

  // Start a namenode to try to load the edits.
  cluster = new MiniDFSCluster.Builder(conf)
    .format(false)
    .manageNameDfsDirs(false)
    .waitSafeMode(false)
    .build();
  cluster.waitClusterUp();

  // Test successful, because no exception thrown.
}
 
Example 13
Source File: TestSaslDataTransfer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testPrivacy() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig(
    "authentication,integrity,privacy");
  startCluster(clusterConf);
  HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
  clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "privacy");
  doTest(clientConf);
}
 
Example 14
Source File: TestSaslDataTransfer.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testServerSaslNoClientSasl() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig(
    "authentication,integrity,privacy");
  // Set short retry timeouts so this test runs faster
  clusterConf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  startCluster(clusterConf);
  HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
  clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");

  LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
      LogFactory.getLog(DataNode.class));
  try {
    doTest(clientConf);
    Assert.fail("Should fail if SASL data transfer protection is not " +
        "configured or not supported in client");
  } catch (IOException e) {
    GenericTestUtils.assertMatches(e.getMessage(), 
        "could only be replicated to 0 nodes");
  } finally {
    logs.stopCapturing();
  }

  GenericTestUtils.assertMatches(logs.getOutput(),
      "Failed to read expected SASL data transfer protection " +
      "handshake from client at");
}
 
Example 15
Source File: TestDecommissioningStatus.java    From big-c with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
      false);

  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, "build/test/data/work-dir/decommission");
  assertTrue(localFileSys.mkdirs(dir));
  excludeFile = new Path(dir, "exclude");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  Path includeFile = new Path(dir, "include");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);

  writeConfigFile(localFileSys, excludeFile, null);
  writeConfigFile(localFileSys, includeFile, null);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  cluster.getNamesystem().getBlockManager().getDatanodeManager()
      .setHeartbeatExpireInterval(3000);
  Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
 
Example 16
Source File: TestDFSHAAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testFailoverWithFencerAndNameservice() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
  HdfsConfiguration conf = getHAConf();
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-ns", "ns1", "-failover", "nn1", "nn2"));
}
 
Example 17
Source File: TestSaslDataTransfer.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testAuthentication() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig(
    "authentication,integrity,privacy");
  startCluster(clusterConf);
  HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
  clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
  doTest(clientConf);
}
 
Example 18
Source File: TestDFSHAAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testFailoverWithFenceAndBadFencer() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
  HdfsConfiguration conf = getHAConf();
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, "foobar!");
  tool.setConf(conf);
  assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
}
 
Example 19
Source File: TestDFSHAAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that the fencing configuration can be overridden per-nameservice
 * or per-namenode
 */
@Test
public void testFencingConfigPerNameNode() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();

  final String nsSpecificKey = DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY + "." + NSID;
  final String nnSpecificKey = nsSpecificKey + ".nn1";
  
  HdfsConfiguration conf = getHAConf();
  // Set the default fencer to succeed
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
  
  // Set the NN-specific fencer to fail. Should fail to fence.
  conf.set(nnSpecificKey, getFencerFalseCommand());
  tool.setConf(conf);
  assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
  conf.unset(nnSpecificKey);

  // Set an NS-specific fencer to fail. Should fail.
  conf.set(nsSpecificKey, getFencerFalseCommand());
  tool.setConf(conf);
  assertEquals(-1, runTool("-failover", "nn1", "nn2", "--forcefence"));
  
  // Set the NS-specific fencer to succeed. Should succeed
  conf.set(nsSpecificKey, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence"));
}
 
Example 20
Source File: TestEnhancedByteBufferAccess.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void test2GBMmapLimit() throws Exception {
  Assume.assumeTrue(BlockReaderTestUtil.shouldTestLargeFiles());
  HdfsConfiguration conf = initZeroCopyTest();
  final long TEST_FILE_LENGTH = 2469605888L;
  conf.set(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY, "NULL");
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, TEST_FILE_LENGTH);
  MiniDFSCluster cluster = null;
  final Path TEST_PATH = new Path("/a");
  final String CONTEXT = "test2GBMmapLimit";
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);

  FSDataInputStream fsIn = null, fsIn2 = null;
  ByteBuffer buf1 = null, buf2 = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LENGTH, (short)1, 0xB);
    DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
    
    fsIn = fs.open(TEST_PATH);
    buf1 = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    Assert.assertEquals(1, buf1.remaining());
    fsIn.releaseBuffer(buf1);
    buf1 = null;
    fsIn.seek(2147483640L);
    buf1 = fsIn.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    Assert.assertEquals(7, buf1.remaining());
    Assert.assertEquals(Integer.MAX_VALUE, buf1.limit());
    fsIn.releaseBuffer(buf1);
    buf1 = null;
    Assert.assertEquals(2147483647L, fsIn.getPos());
    try {
      buf1 = fsIn.read(null, 1024,
          EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.fail("expected UnsupportedOperationException");
    } catch (UnsupportedOperationException e) {
      // expected; can't read past 2GB boundary.
    }
    fsIn.close();
    fsIn = null;

    // Now create another file with normal-sized blocks, and verify we
    // can read past 2GB
    final Path TEST_PATH2 = new Path("/b");
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 268435456L);
    DFSTestUtil.createFile(fs, TEST_PATH2, 1024 * 1024, TEST_FILE_LENGTH,
        268435456L, (short)1, 0xA);
    
    fsIn2 = fs.open(TEST_PATH2);
    fsIn2.seek(2147483640L);
    buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    Assert.assertEquals(8, buf2.remaining());
    Assert.assertEquals(2147483648L, fsIn2.getPos());
    fsIn2.releaseBuffer(buf2);
    buf2 = null;
    buf2 = fsIn2.read(null, 1024, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    Assert.assertEquals(1024, buf2.remaining());
    Assert.assertEquals(2147484672L, fsIn2.getPos());
    fsIn2.releaseBuffer(buf2);
    buf2 = null;
  } finally {
    if (buf1 != null) {
      fsIn.releaseBuffer(buf1);
    }
    if (buf2 != null) {
      fsIn2.releaseBuffer(buf2);
    }
    IOUtils.cleanup(null, fsIn, fsIn2);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}