Java Code Examples for org.apache.hadoop.hdfs.HdfsConfiguration#setInt()

The following examples show how to use org.apache.hadoop.hdfs.HdfsConfiguration#setInt() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestEnhancedByteBufferAccess.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static HdfsConfiguration initZeroCopyTest() {
  Assume.assumeTrue(NativeIO.isAvailable());
  Assume.assumeTrue(SystemUtils.IS_OS_UNIX);
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_SIZE, 3);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS, 100);
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
      new File(sockDir.getDir(),
        "TestRequestMmapAccess._PORT.sock").getAbsolutePath());
  conf.setBoolean(DFSConfigKeys.
      DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, true);
  conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000);
  return conf;
}
 
Example 2
Source File: TestDataNodeVolumeFailureReporting.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Initializes the cluster.
 *
 * @param numDataNodes number of datanodes
 * @param storagesPerDatanode number of storage locations on each datanode
 * @param failedVolumesTolerated number of acceptable volume failures
 * @throws Exception if there is any failure
 */
private void initCluster(int numDataNodes, int storagesPerDatanode,
    int failedVolumesTolerated) throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
      failedVolumesTolerated);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
      .storagesPerDatanode(storagesPerDatanode).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
  long dnCapacity = DFSTestUtil.getDatanodeCapacity(
      cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
  volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
}
 
Example 3
Source File: SaslDataTransferTestCase.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Creates configuration for starting a secure cluster.
 *
 * @param dataTransferProtection supported QOPs
 * @return configuration for starting a secure cluster
 * @throws Exception if there is any failure
 */
protected HdfsConfiguration createSecureConfig(
    String dataTransferProtection) throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
  conf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  conf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  conf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  conf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, dataTransferProtection);
  conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);

  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(this.getClass());
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
  return conf;
}
 
Example 4
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void startUp() throws IOException {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
  conf.setLong(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
  cluster = new MiniDFSCluster.Builder(conf)
      .format(true)
      .numDataNodes(DATANODE_NUM)
      .nameNodePort(NameNode.DEFAULT_PORT)
      .waitSafeMode(true)
      .build();
  fs = cluster.getFileSystem();
}
 
Example 5
Source File: TestNameNodeRetryCacheMetrics.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
 
Example 6
Source File: TestNameNodeRetryCacheMetrics.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(3)
      .build();
  cluster.waitActive();
  cluster.transitionToActive(namenodeId);
  HATestUtil.setFailoverConfigurations(cluster, conf);
  filesystem = (DistributedFileSystem) HATestUtil.configureFailoverFs(cluster, conf);
  namesystem = cluster.getNamesystem(namenodeId);
  metrics = namesystem.getRetryCache().getMetricsForTests();
}
 
Example 7
Source File: TestCacheDirectives.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static HdfsConfiguration createCachingConf() {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, CACHE_CAPACITY);
  conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000);
  // set low limits here for testing purposes
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      2);

  return conf;
}
 
Example 8
Source File: TestDataNodeExit.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws IOException {
  conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 100);
  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(3))
    .build();
  for (int i = 0; i < 3; i++) {
    cluster.waitActive(i);
  }
}
 
Example 9
Source File: TestDataNodeRollingUpgrade.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void startCluster() throws IOException {
  conf = new HdfsConfiguration();
  conf.setInt("dfs.blocksize", 1024*1024);
  cluster = new Builder(conf).numDataNodes(REPL_FACTOR).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  nn = cluster.getNameNode(0);
  assertNotNull(nn);
  dn0 = cluster.getDataNodes().get(0);
  assertNotNull(dn0);
  blockPoolId = cluster.getNameNode(0).getNamesystem().getBlockPoolId();
}
 
Example 10
Source File: TestCacheDirectives.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static HdfsConfiguration createCachingConf() {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.setLong(DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  conf.setLong(DFS_DATANODE_MAX_LOCKED_MEMORY_KEY, CACHE_CAPACITY);
  conf.setLong(DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setLong(DFS_CACHEREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setLong(DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS, 1000);
  // set low limits here for testing purposes
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
      2);

  return conf;
}
 
Example 11
Source File: TestDataNodeVolumeFailure.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  // bring up a cluster of 2
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, block_size);
  // Allow a single volume failure (there are two volumes)
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dn_num).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = new File(cluster.getDataDirectory());
}
 
Example 12
Source File: TestSaslDataTransfer.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testServerSaslNoClientSasl() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig(
    "authentication,integrity,privacy");
  // Set short retry timeouts so this test runs faster
  clusterConf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  startCluster(clusterConf);
  HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
  clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");

  LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
      LogFactory.getLog(DataNode.class));
  try {
    doTest(clientConf);
    Assert.fail("Should fail if SASL data transfer protection is not " +
        "configured or not supported in client");
  } catch (IOException e) {
    GenericTestUtils.assertMatches(e.getMessage(), 
        "could only be replicated to 0 nodes");
  } finally {
    logs.stopCapturing();
  }

  GenericTestUtils.assertMatches(logs.getOutput(),
      "Failed to read expected SASL data transfer protection " +
      "handshake from client at");
}
 
Example 13
Source File: TestDecommissioningStatus.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
      false);

  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, "build/test/data/work-dir/decommission");
  assertTrue(localFileSys.mkdirs(dir));
  excludeFile = new Path(dir, "exclude");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  Path includeFile = new Path(dir, "include");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);

  writeConfigFile(localFileSys, excludeFile, null);
  writeConfigFile(localFileSys, includeFile, null);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  cluster.getNamesystem().getBlockManager().getDatanodeManager()
      .setHeartbeatExpireInterval(3000);
  Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
 
Example 14
Source File: TestDecommissioningStatus.java    From big-c with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
      false);

  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, "build/test/data/work-dir/decommission");
  assertTrue(localFileSys.mkdirs(dir));
  excludeFile = new Path(dir, "exclude");
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  Path includeFile = new Path(dir, "include");
  conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);

  writeConfigFile(localFileSys, excludeFile, null);
  writeConfigFile(localFileSys, includeFile, null);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  cluster.getNamesystem().getBlockManager().getDatanodeManager()
      .setHeartbeatExpireInterval(3000);
  Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
 
Example 15
Source File: HDFSContract.java    From big-c with Apache License 2.0 5 votes vote down vote up
public static void createCluster() throws IOException {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.addResource(CONTRACT_HDFS_XML);
  //hack in a 256 byte block size
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);

  cluster =
    new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  cluster.waitClusterUp();
}
 
Example 16
Source File: TestSaslDataTransfer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testServerSaslNoClientSasl() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig(
    "authentication,integrity,privacy");
  // Set short retry timeouts so this test runs faster
  clusterConf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  startCluster(clusterConf);
  HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
  clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "");

  LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
      LogFactory.getLog(DataNode.class));
  try {
    doTest(clientConf);
    Assert.fail("Should fail if SASL data transfer protection is not " +
        "configured or not supported in client");
  } catch (IOException e) {
    GenericTestUtils.assertMatches(e.getMessage(), 
        "could only be replicated to 0 nodes");
  } finally {
    logs.stopCapturing();
  }

  GenericTestUtils.assertMatches(logs.getOutput(),
      "Failed to read expected SASL data transfer protection " +
      "handshake from client at");
}
 
Example 17
Source File: SecureClusterTest.java    From knox with Apache License 2.0 4 votes vote down vote up
private static void initKdc() throws Exception {
  Properties kdcConf = MiniKdc.createConf();
  kdc = new MiniKdc(kdcConf, baseDir);
  kdc.start();

  configuration = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.KERBEROS, configuration);
  UserGroupInformation.setConfiguration(configuration);
  assertTrue("Expected configuration to enable security", UserGroupInformation.isSecurityEnabled());
  userName = UserGroupInformation.createUserForTesting("guest", new String[]{"users"}).getUserName();
  File keytabFile = new File(baseDir, userName + ".keytab");
  String keytab = keytabFile.getAbsolutePath();
  // Windows will not reverse name lookup "127.0.0.1" to "localhost".
  String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost";
  kdc.createPrincipal(keytabFile, userName + "/" + krbInstance, "HTTP/" + krbInstance);
  String hdfsPrincipal = userName + "/" + krbInstance + "@" + kdc.getRealm();
  String spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.getRealm();

  configuration.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  configuration.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  configuration.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  configuration.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  configuration.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  configuration.set(DFS_JOURNALNODE_KEYTAB_FILE_KEY, keytab);
  configuration.set(DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  configuration.set(DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY, spnegoPrincipal);
  configuration.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  configuration.set(DFS_DATA_ENCRYPTION_ALGORITHM_KEY, "authentication");
  configuration.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTP_AND_HTTPS.name());
  configuration.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  configuration.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  configuration.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
  configuration.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 10);
  configuration.set("hadoop.proxyuser." + userName + ".hosts", "*");
  configuration.set("hadoop.proxyuser." + userName + ".groups", "*");
  configuration.setBoolean("dfs.permissions", true);

  String keystoresDir = baseDir.getAbsolutePath();
  File sslClientConfFile = new File(keystoresDir + "/ssl-client.xml");
  File sslServerConfFile = new File(keystoresDir + "/ssl-server.xml");
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, keystoresDir, configuration, false);
  configuration.set(DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY,
      sslClientConfFile.getName());
  configuration.set(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
      sslServerConfFile.getName());

  setupKnox(keytab, hdfsPrincipal);
}
 
Example 18
Source File: TestPendingCorruptDnMessages.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testChangedStorageId() throws IOException, URISyntaxException,
    InterruptedException {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .build();
  
  try {
    cluster.transitionToActive(0);
    
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    OutputStream out = fs.create(filePath);
    out.write("foo bar baz".getBytes());
    out.close();
    
    HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
        cluster.getNameNode(1));
    
    // Change the gen stamp of the block on datanode to go back in time (gen
    // stamps start at 1000)
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
    assertTrue(cluster.changeGenStampOfBlock(0, block, 900));
    
    // Stop the DN so the replica with the changed gen stamp will be reported
    // when this DN starts up.
    DataNodeProperties dnProps = cluster.stopDataNode(0);
    
    // Restart the namenode so that when the DN comes up it will see an initial
    // block report.
    cluster.restartNameNode(1, false);
    assertTrue(cluster.restartDataNode(dnProps, true));
    
    // Wait until the standby NN queues up the corrupt block in the pending DN
    // message queue.
    while (cluster.getNamesystem(1).getBlockManager()
        .getPendingDataNodeMessageCount() < 1) {
      ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
    }
    
    assertEquals(1, cluster.getNamesystem(1).getBlockManager()
        .getPendingDataNodeMessageCount());
    String oldStorageId = getRegisteredDatanodeUid(cluster, 1);
    
    // Reformat/restart the DN.
    assertTrue(wipeAndRestartDn(cluster, 0));
    
    // Give the DN time to start up and register, which will cause the
    // DatanodeManager to dissociate the old storage ID from the DN xfer addr.
    String newStorageId = "";
    do {
      ThreadUtil.sleepAtLeastIgnoreInterrupts(1000);
      newStorageId = getRegisteredDatanodeUid(cluster, 1);
      System.out.println("====> oldStorageId: " + oldStorageId +
          " newStorageId: " + newStorageId);
    } while (newStorageId.equals(oldStorageId));
    
    assertEquals(0, cluster.getNamesystem(1).getBlockManager()
        .getPendingDataNodeMessageCount());
    
    // Now try to fail over.
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
  } finally {
    cluster.shutdown();
  }
}
 
Example 19
Source File: TestEnhancedByteBufferAccess.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testClientMmapDisable() throws Exception {
  HdfsConfiguration conf = initZeroCopyTest();
  conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, false);
  MiniDFSCluster cluster = null;
  final Path TEST_PATH = new Path("/a");
  final int TEST_FILE_LENGTH = 16385;
  final int RANDOM_SEED = 23453;
  final String CONTEXT = "testClientMmapDisable";
  FSDataInputStream fsIn = null;
  DistributedFileSystem fs = null;
  conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT);

  try {
    // With DFS_CLIENT_MMAP_ENABLED set to false, we should not do memory
    // mapped reads.
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, TEST_PATH,
        TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
    DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
    fsIn = fs.open(TEST_PATH);
    try {
      fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
      Assert.fail("expected zero-copy read to fail when client mmaps " +
          "were disabled.");
    } catch (UnsupportedOperationException e) {
    }
  } finally {
    if (fsIn != null) fsIn.close();
    if (fs != null) fs.close();
    if (cluster != null) cluster.shutdown();
  }

  fsIn = null;
  fs = null;
  cluster = null;
  try {
    // Now try again with DFS_CLIENT_MMAP_CACHE_SIZE == 0.  It should work.
    conf.setBoolean(DFS_CLIENT_MMAP_ENABLED, true);
    conf.setInt(DFS_CLIENT_MMAP_CACHE_SIZE, 0);
    conf.set(DFSConfigKeys.DFS_CLIENT_CONTEXT, CONTEXT + ".1");
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, TEST_PATH,
        TEST_FILE_LENGTH, (short)1, RANDOM_SEED);
    DFSTestUtil.waitReplication(fs, TEST_PATH, (short)1);
    fsIn = fs.open(TEST_PATH);
    ByteBuffer buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    fsIn.releaseBuffer(buf);
    // Test EOF behavior
    IOUtils.skipFully(fsIn, TEST_FILE_LENGTH - 1);
    buf = fsIn.read(null, 1, EnumSet.of(ReadOption.SKIP_CHECKSUMS));
    Assert.assertEquals(null, buf);
  } finally {
    if (fsIn != null) fsIn.close();
    if (fs != null) fs.close();
    if (cluster != null) cluster.shutdown();
  }
}
 
Example 20
Source File: TestSecureNNWithQJM.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void init() throws Exception {
  baseDir = new File(System.getProperty("test.build.dir", "target/test-dir"),
    TestSecureNNWithQJM.class.getSimpleName());
  FileUtil.fullyDelete(baseDir);
  assertTrue(baseDir.mkdirs());

  Properties kdcConf = MiniKdc.createConf();
  kdc = new MiniKdc(kdcConf, baseDir);
  kdc.start();

  baseConf = new HdfsConfiguration();
  SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS,
    baseConf);
  UserGroupInformation.setConfiguration(baseConf);
  assertTrue("Expected configuration to enable security",
    UserGroupInformation.isSecurityEnabled());

  String userName = UserGroupInformation.getLoginUser().getShortUserName();
  File keytabFile = new File(baseDir, userName + ".keytab");
  String keytab = keytabFile.getAbsolutePath();
  // Windows will not reverse name lookup "127.0.0.1" to "localhost".
  String krbInstance = Path.WINDOWS ? "127.0.0.1" : "localhost";
  kdc.createPrincipal(keytabFile,
    userName + "/" + krbInstance,
    "HTTP/" + krbInstance);
  String hdfsPrincipal = userName + "/" + krbInstance + "@" + kdc.getRealm();
  String spnegoPrincipal = "HTTP/" + krbInstance + "@" + kdc.getRealm();

  baseConf.set(DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  baseConf.set(DFS_NAMENODE_KEYTAB_FILE_KEY, keytab);
  baseConf.set(DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  baseConf.set(DFS_DATANODE_KEYTAB_FILE_KEY, keytab);
  baseConf.set(DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, spnegoPrincipal);
  baseConf.set(DFS_JOURNALNODE_KEYTAB_FILE_KEY, keytab);
  baseConf.set(DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY, hdfsPrincipal);
  baseConf.set(DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
    spnegoPrincipal);
  baseConf.setBoolean(DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  baseConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
  baseConf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  baseConf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  baseConf.set(DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
  baseConf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
  baseConf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SASL_KEY, 10);

  String keystoresDir = baseDir.getAbsolutePath();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(
    TestSecureNNWithQJM.class);
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, baseConf, false);
}