org.apache.hadoop.hdfs.DFSConfigKeys Java Examples

The following examples show how to use org.apache.hadoop.hdfs.DFSConfigKeys. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestBlocksWithNotEnoughRacks.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private Configuration getConf() {
  Configuration conf = new HdfsConfiguration();

  // Lower the heart beat interval so the NN quickly learns of dead
  // or decommissioned DNs and the NN issues replication and invalidation
  // commands quickly (as replies to heartbeats)
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);

  // Have the NN ReplicationMonitor compute the replication and
  // invalidation commands to send DNs every second.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);

  // Have the NN check for pending replications every second so it
  // quickly schedules additional replicas as they are identified.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);

  // The DNs report blocks every second.
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);

  // Indicates we have multiple racks
  conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
  return conf;
}
 
Example #2
Source File: TestDiskError.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Check that the permissions of the local DN directories are as expected.
 */
@Test
public void testLocalDirs() throws Exception {
  Configuration conf = new Configuration();
  final String permStr = conf.get(
    DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY);
  FsPermission expected = new FsPermission(permStr);

  // Check permissions on directories in 'dfs.datanode.data.dir'
  FileSystem localFS = FileSystem.getLocal(conf);
  for (DataNode dn : cluster.getDataNodes()) {
    for (FsVolumeSpi v : dn.getFSDataset().getVolumes()) {
      String dir = v.getBasePath();
      Path dataDir = new Path(dir);
      FsPermission actual = localFS.getFileStatus(dataDir).getPermission();
        assertEquals("Permission for dir: " + dataDir + ", is " + actual +
            ", while expected is " + expected, expected, actual);
    }
  }
}
 
Example #3
Source File: TestNfs3HttpServer.java    From big-c with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY,
      HttpConfig.Policy.HTTP_AND_HTTPS.name());
  conf.set(NfsConfigKeys.NFS_HTTP_ADDRESS_KEY, "localhost:0");
  conf.set(NfsConfigKeys.NFS_HTTPS_ADDRESS_KEY, "localhost:0");
  // Use emphral port in case tests are running in parallel
  conf.setInt(NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY, 0);
  conf.setInt(NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY, 0);
  
  File base = new File(BASEDIR);
  FileUtil.fullyDelete(base);
  base.mkdirs();
  keystoresDir = new File(BASEDIR).getAbsolutePath();
  sslConfDir = KeyStoreTestUtil.getClasspathDir(TestNfs3HttpServer.class);
  KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
}
 
Example #4
Source File: TestDelegationTokenRenewer.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Auxiliary - create token
 * @param renewer
 * @return
 * @throws IOException
 */
static MyToken createTokens(Text renewer) 
  throws IOException {
  Text user1= new Text("user1");
  
  MyDelegationTokenSecretManager sm = new MyDelegationTokenSecretManager(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT,
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT,
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT,
      3600000, null);
  sm.startThreads();
  
  DelegationTokenIdentifier dtId1 = 
    new DelegationTokenIdentifier(user1, renewer, user1);
  
  MyToken token1 = new MyToken(dtId1, sm);
 
  token1.setService(new Text("localhost:0"));
  return token1;
}
 
Example #5
Source File: TestBootstrapStandbyWithBKJM.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
      .createJournalURI("/bootstrapStandby").toString());
  BKJMUtil.addJournalManagerDefinition(conf);
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
  conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
      SlowCodec.class.getCanonicalName());
  CompressionCodecFactory.setCodecClasses(conf,
      ImmutableList.<Class> of(SlowCodec.class));
  MiniDFSNNTopology topology = new MiniDFSNNTopology()
      .addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN(
          new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN(
          new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002)));
  cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
      .numDataNodes(1).manageNameDfsSharedDirs(false).build();
  cluster.waitActive();
}
 
Example #6
Source File: TestGenericJournalConf.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test that a implementation of JournalManager without a 
 * (Configuration,URI) constructor throws an exception
 */
@Test
public void testBadConstructor() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
           BadConstructorJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
           "dummy://test");
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    fail("Should have failed before this point");
  } catch (IllegalArgumentException iae) {
    if (!iae.getMessage().contains("Unable to construct journal")) {
      fail("Should have failed with unable to construct exception");
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #7
Source File: FSEditLogOp.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Construct the reader
 * @param in The stream to read from.
 * @param logVersion The version of the data coming from the stream.
 */
public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) {
  this.logVersion = logVersion;
  if (NameNodeLayoutVersion.supports(
      LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) {
    this.checksum = DataChecksum.newCrc32();
  } else {
    this.checksum = null;
  }
  // It is possible that the logVersion is actually a future layoutversion
  // during the rolling upgrade (e.g., the NN gets upgraded first). We
  // assume future layout will also support length of editlog op.
  this.supportEditLogLength = NameNodeLayoutVersion.supports(
      NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion)
      || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;

  if (this.checksum != null) {
    this.in = new DataInputStream(
        new CheckedInputStream(in, this.checksum));
  } else {
    this.in = in;
  }
  this.limiter = limiter;
  this.cache = new OpInstanceCache();
  this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
 
Example #8
Source File: TestFsck.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testFsckNonExistent() throws Exception {
  DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
      setNumFiles(20).build();
  MiniDFSCluster cluster = null;
  FileSystem fs = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
    fs = cluster.getFileSystem();
    util.createFiles(fs, "/srcdat");
    util.waitReplication(fs, "/srcdat", (short)3);
    String outStr = runFsck(conf, 0, true, "/non-existent");
    assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
    System.out.println(outStr);
    util.cleanup(fs, "/srcdat");
  } finally {
    if (fs != null) {try{fs.close();} catch(Exception e){}}
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #9
Source File: TestStandbyCheckpoints.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test for the case when the SBN is configured to checkpoint based
 * on a time period, but no transactions are happening on the
 * active. Thus, it would want to save a second checkpoint at the
 * same txid, which is a no-op. This test makes sure this doesn't
 * cause any problem.
 */
@Test(timeout = 300000)
public void testCheckpointWhenNoNewTransactionsHappened()
    throws Exception {
  // Checkpoint as fast as we can, in a tight loop.
  cluster.getConfiguration(1).setInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
  cluster.restartNameNode(1);
  nn1 = cluster.getNameNode(1);
 
  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);
  
  // We shouldn't save any checkpoints at txid=0
  Thread.sleep(1000);
  Mockito.verify(spyImage1, Mockito.never())
    .saveNamespace((FSNamesystem) Mockito.anyObject());
 
  // Roll the primary and wait for the standby to catch up
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  Thread.sleep(2000);
  
  // We should make exactly one checkpoint at this new txid. 
  Mockito.verify(spyImage1, Mockito.times(1)).saveNamespace(
      (FSNamesystem) Mockito.anyObject(), Mockito.eq(NameNodeFile.IMAGE),
      (Canceler) Mockito.anyObject());
}
 
Example #10
Source File: TestBalancer.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private void runBalancer(Configuration conf,
   long totalUsedSpace, long totalCapacity, Balancer.Parameters p,
   int excludedNodes) throws Exception {
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);

  // start rebalancing
  Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
  final int r = runBalancer(namenodes, p, conf);
  if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, 
      DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) {
    assertEquals(ExitStatus.NO_MOVE_PROGRESS.getExitCode(), r);
    return;
  } else {
    assertEquals(ExitStatus.SUCCESS.getExitCode(), r);
  }
  waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
  LOG.info("  .");
  waitForBalancer(totalUsedSpace, totalCapacity, client, cluster, p, excludedNodes);
}
 
Example #11
Source File: TestDataNodeVolumeFailureReporting.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Reconfigure a DataNode by setting a new list of volumes.
 *
 * @param dn DataNode to reconfigure
 * @param newVols new volumes to configure
 * @throws Exception if there is any failure
 */
private static void reconfigureDataNode(DataNode dn, File... newVols)
    throws Exception {
  StringBuilder dnNewDataDirs = new StringBuilder();
  for (File newVol: newVols) {
    if (dnNewDataDirs.length() > 0) {
      dnNewDataDirs.append(',');
    }
    dnNewDataDirs.append(newVol.getAbsolutePath());
  }
  try {
    dn.reconfigurePropertyImpl(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
        dnNewDataDirs.toString());
  } catch (ReconfigurationException e) {
    // This can be thrown if reconfiguration tries to use a failed volume.
    // We need to swallow the exception, because some of our tests want to
    // cover this case.
    LOG.warn("Could not reconfigure DataNode.", e);
  }
}
 
Example #12
Source File: HATestUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Sets the required configurations for performing failover
 */
public static void setFailoverConfigurations(Configuration conf,
    String logicalName, InetSocketAddress nnAddr1,
    InetSocketAddress nnAddr2) {
  String nameNodeId1 = "nn1";
  String nameNodeId2 = "nn2";
  String address1 = "hdfs://" + nnAddr1.getHostName() + ":" + nnAddr1.getPort();
  String address2 = "hdfs://" + nnAddr2.getHostName() + ":" + nnAddr2.getPort();
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      logicalName, nameNodeId1), address1);
  conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY,
      logicalName, nameNodeId2), address2);
  
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, logicalName);
  conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, logicalName),
      nameNodeId1 + "," + nameNodeId2);
  conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName,
      ConfiguredFailoverProxyProvider.class.getName());
  conf.set("fs.defaultFS", "hdfs://" + logicalName);
}
 
Example #13
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 6 votes vote down vote up
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);
  
  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
 
Example #14
Source File: TestStartup.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * secnn-6
 * checkpoint for edits and image is the same directory
 * @throws IOException
 */
@Test
public void testChkpointStartup2() throws IOException{
  LOG.info("--starting checkpointStartup2 - same directory for checkpoint");
  // different name dirs
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name")).toString());
  config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      fileAsURI(new File(hdfsDir, "edits")).toString());
  // same checkpoint dirs
  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
      fileAsURI(new File(hdfsDir, "chkpt")).toString());
  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      fileAsURI(new File(hdfsDir, "chkpt")).toString());

  createCheckPoint(1);

  corruptNameNodeFiles();
  checkNameNodeFiles();

}
 
Example #15
Source File: WebHdfsTestUtil.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static WebHdfsFileSystem getWebHdfsFileSystem(
    final Configuration conf, String scheme) throws IOException,
    URISyntaxException {
  final String uri;

  if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
    uri = WebHdfsFileSystem.SCHEME + "://"
        + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
    uri = SWebHdfsFileSystem.SCHEME + "://"
        + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
  } else {
    throw new IllegalArgumentException("unknown scheme:" + scheme);
  }
  return (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
}
 
Example #16
Source File: TestWebHdfsWithMultipleNameNodes.java    From big-c with Apache License 2.0 6 votes vote down vote up
private static void setupCluster(final int nNameNodes, final int nDataNodes)
    throws Exception {
  LOG.info("nNameNodes=" + nNameNodes + ", nDataNodes=" + nDataNodes);

  conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);

  cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(nNameNodes))
      .numDataNodes(nDataNodes)
      .build();
  cluster.waitActive();
  
  webhdfs = new WebHdfsFileSystem[nNameNodes];
  for(int i = 0; i < webhdfs.length; i++) {
    final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress();
    final String uri = WebHdfsFileSystem.SCHEME  + "://"
        + addr.getHostName() + ":" + addr.getPort() + "/";
    webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf);
  }
}
 
Example #17
Source File: TestInitializeSharedEdits.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setupCluster() throws IOException {
  conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  HAUtil.setAllowStandbyReads(conf, true);
  
  MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
  
  cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(topology)
    .numDataNodes(0)
    .build();
  cluster.waitActive();

  shutdownClusterAndRemoveSharedEditsDir();
}
 
Example #18
Source File: TestGenericJournalConf.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** 
 * Test that an exception is thrown if a journal class doesn't exist
 * in the configuration 
 */
@Test(expected=IllegalArgumentException.class)
public void testNotConfigured() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();

  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
           "dummy://test");
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #19
Source File: FsVolumeImpl.java    From hadoop with Apache License 2.0 5 votes vote down vote up
FsVolumeImpl(FsDatasetImpl dataset, String storageID, File currentDir,
    Configuration conf, StorageType storageType) throws IOException {
  this.dataset = dataset;
  this.storageID = storageID;
  this.reserved = conf.getLong(
      DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY,
      DFSConfigKeys.DFS_DATANODE_DU_RESERVED_DEFAULT);
  this.reservedForRbw = new AtomicLong(0L);
  this.currentDir = currentDir; 
  File parent = currentDir.getParentFile();
  this.usage = new DF(parent, conf);
  this.storageType = storageType;
  this.configuredCapacity = -1;
  cacheExecutor = initializeCacheExecutor(parent);
}
 
Example #20
Source File: TestDelegationToken.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  config = new HdfsConfiguration();
  config.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
  config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, 10000);
  config.setLong(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, 5000);
  config.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  config.set("hadoop.security.auth_to_local",
      "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
  FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
  cluster = new MiniDFSCluster.Builder(config).numDataNodes(0).build();
  cluster.waitActive();
  dtSecretManager = NameNodeAdapter.getDtSecretManager(
      cluster.getNamesystem());
}
 
Example #21
Source File: TestFsLimits.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testMaxComponentLengthRename() throws Exception {
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 2);

  mkdirs("/5", null);
  rename("/5", "/555", PathComponentTooLongException.class);
  rename("/5", "/55", null);

  mkdirs("/6", null);
  deprecatedRename("/6", "/666", PathComponentTooLongException.class);
  deprecatedRename("/6", "/66", null);
}
 
Example #22
Source File: TestDataNodeHotSwapVolumes.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=60000)
public void testRemoveOneVolume()
    throws ReconfigurationException, InterruptedException, TimeoutException,
    IOException {
  startDFSCluster(1, 1);
  final short replFactor = 1;
  Path testFile = new Path("/test");
  createFile(testFile, 10, replFactor);

  DataNode dn = cluster.getDataNodes().get(0);
  Collection<String> oldDirs = getDataDirs(dn);
  String newDirs = oldDirs.iterator().next();  // Keep the first volume.
  dn.reconfigurePropertyImpl(
      DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, newDirs);
  assertFileLocksReleased(
    new ArrayList<String>(oldDirs).subList(1, oldDirs.size()));
  dn.scheduleAllBlockReport(0);

  try {
    DFSTestUtil.readFile(cluster.getFileSystem(), testFile);
    fail("Expect to throw BlockMissingException.");
  } catch (BlockMissingException e) {
    GenericTestUtils.assertExceptionContains("Could not obtain block", e);
  }

  Path newFile = new Path("/newFile");
  createFile(newFile, 6);

  String bpid = cluster.getNamesystem().getBlockPoolId();
  List<Map<DatanodeStorage, BlockListAsLongs>> blockReports =
      cluster.getAllBlockReports(bpid);
  assertEquals((int)replFactor, blockReports.size());

  BlockListAsLongs blocksForVolume1 =
      blockReports.get(0).values().iterator().next();
  // The first volume has half of the testFile and full of newFile.
  assertEquals(10 / 2 + 6, blocksForVolume1.getNumberOfBlocks());
}
 
Example #23
Source File: HBaseKerberosUtils.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Set up SSL configuration for HDFS NameNode and DataNode.
 * @param utility a HBaseTestingUtility object.
 * @param clazz the caller test class.
 * @throws Exception if unable to set up SSL configuration
 */
public static void setSSLConfiguration(HBaseCommonTestingUtility utility, Class<?> clazz)
  throws Exception {
  Configuration conf = utility.getConfiguration();
  conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
  conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");

  File keystoresDir = new File(utility.getDataTestDir("keystore").toUri().getPath());
  keystoresDir.mkdirs();
  String sslConfDir = KeyStoreTestUtil.getClasspathDir(clazz);
  KeyStoreTestUtil.setupSSLConfig(keystoresDir.getAbsolutePath(), sslConfDir, conf, false);
}
 
Example #24
Source File: TestDeleteRace.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testRenameRace() throws Exception {
  try {
    conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
        SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
    cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    Path dirPath1 = new Path("/testRenameRace1");
    Path dirPath2 = new Path("/testRenameRace2");
    Path filePath = new Path("/testRenameRace1/file1");
    

    fs.mkdirs(dirPath1);
    FSDataOutputStream out = fs.create(filePath);
    Thread renameThread = new RenameThread(fs, dirPath1, dirPath2);
    renameThread.start();

    // write data and close to make sure a block is allocated.
    out.write(new byte[32], 0, 32);
    out.close();

    // Restart name node so that it replays edit. If old path was
    // logged in edit, it will fail to come up.
    cluster.restartNameNode(0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #25
Source File: TestDelegationToken.java    From big-c with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWebHdfsApi() throws Exception {
  ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL);
  final String uri = WebHdfsFileSystem.SCHEME  + "://"
      + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
  //get file system as JobTracker
  final UserGroupInformation ugi = UserGroupInformation.createUserForTesting(
      "JobTracker", new String[]{"user"});
  final WebHdfsFileSystem webhdfs = ugi.doAs(
      new PrivilegedExceptionAction<WebHdfsFileSystem>() {
    @Override
    public WebHdfsFileSystem run() throws Exception {
      return (WebHdfsFileSystem)FileSystem.get(new URI(uri), config);
    }
  });

  { //test addDelegationTokens(..)
    Credentials creds = new Credentials();
    final Token<?> tokens[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(1, tokens.length);
    Assert.assertEquals(1, creds.numberOfTokens());
    Assert.assertSame(tokens[0], creds.getAllTokens().iterator().next());
    checkTokenIdentifier(ugi, tokens[0]);
    final Token<?> tokens2[] = webhdfs.addDelegationTokens("JobTracker", creds);
    Assert.assertEquals(0, tokens2.length);
  }
}
 
Example #26
Source File: TestAddBlock.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws IOException {
  conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
      .build();
  cluster.waitActive();
}
 
Example #27
Source File: DatanodeManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Parse a DatanodeID from a hosts file entry
 * @param hostLine of form [hostname|ip][:port]?
 * @return DatanodeID constructed from the given string
 */
private DatanodeID parseDNFromHostsEntry(String hostLine) {
  DatanodeID dnId;
  String hostStr;
  int port;
  int idx = hostLine.indexOf(':');

  if (-1 == idx) {
    hostStr = hostLine;
    port = DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT;
  } else {
    hostStr = hostLine.substring(0, idx);
    port = Integer.parseInt(hostLine.substring(idx+1));
  }

  if (InetAddresses.isInetAddress(hostStr)) {
    // The IP:port is sufficient for listing in a report
    dnId = new DatanodeID(hostStr, "", "", port,
        DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
        DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
  } else {
    String ipAddr = "";
    try {
      ipAddr = InetAddress.getByName(hostStr).getHostAddress();
    } catch (UnknownHostException e) {
      LOG.warn("Invalid hostname " + hostStr + " in hosts file");
    }
    dnId = new DatanodeID(ipAddr, hostStr, "", port,
        DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
        DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
        DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT);
  }
  return dnId;
}
 
Example #28
Source File: TestStickyBit.java    From big-c with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void init() throws Exception {
  conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  initCluster(true);
}
 
Example #29
Source File: TestViewFileSystemAtHdfsRoot.java    From big-c with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void clusterSetupAtBegining() throws IOException,
    LoginException, URISyntaxException {
  SupportsBlocks = true;
  CONF.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  
  cluster = new MiniDFSCluster.Builder(CONF)
    .numDataNodes(2)
    .build();
  cluster.waitClusterUp();
  
  fHdfs = cluster.getFileSystem();
}
 
Example #30
Source File: TestPermissionSymlinks.java    From big-c with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void beforeClassSetUp() throws Exception {
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  conf.set(FsPermission.UMASK_LABEL, "000");
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  wrapper = new FileSystemTestWrapper(fs);
}