org.apache.hadoop.hdfs.HdfsConfiguration Java Examples

The following examples show how to use org.apache.hadoop.hdfs.HdfsConfiguration. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestAuditLogger.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that TopAuditLogger can be disabled
 */
@Test
public void testDisableTopAuditLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(NNTOP_ENABLED_KEY, false);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    List<AuditLogger> auditLoggers =
        cluster.getNameNode().getNamesystem().getAuditLoggers();
    for (AuditLogger auditLogger : auditLoggers) {
      assertFalse(
          "top audit logger is still hooked in after it is disabled",
          auditLogger instanceof TopAuditLogger);
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example #2
Source File: DfsServlet.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
 */
protected ClientProtocol createNameNodeProxy() throws IOException {
  ServletContext context = getServletContext();
  // if we are running in the Name Node, use it directly rather than via 
  // rpc
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
  if (nn != null) {
    return nn.getRpcServer();
  }
  InetSocketAddress nnAddr =
    NameNodeHttpServer.getNameNodeAddressFromContext(context);
  Configuration conf = new HdfsConfiguration(
      NameNodeHttpServer.getConfFromContext(context));
  return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
      ClientProtocol.class).getProxy();
}
 
Example #3
Source File: TestLossyRetryInvocationHandler.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testStartNNWithTrashEmptier() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new HdfsConfiguration();
  
  // enable both trash emptier and dropping response
  conf.setLong("fs.trash.interval", 360);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
        .build();
    cluster.waitActive();
    cluster.transitionToActive(0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example #4
Source File: FileChecksumServlets.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws ServletException, IOException {
  final PrintWriter out = response.getWriter();
  final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();

  final ServletContext context = getServletContext();
  final DataNode datanode = (DataNode) context.getAttribute("datanode");
  final Configuration conf = 
    new HdfsConfiguration(datanode.getConf());
  
  try {
    final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, 
        datanode, conf, getUGI(request, conf));
    final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE);
    MD5MD5CRC32FileChecksum.write(xml, checksum);
  } catch(IOException ioe) {
    writeXml(ioe, path, xml);
  } catch (InterruptedException e) {
    writeXml(e, path, xml);
  }
  xml.endDocument();
}
 
Example #5
Source File: DFSZKFailoverController.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static void main(String args[])
    throws Exception {
  if (DFSUtil.parseHelpArgument(args, 
      ZKFailoverController.USAGE, System.out, true)) {
    System.exit(0);
  }
  
  GenericOptionsParser parser = new GenericOptionsParser(
      new HdfsConfiguration(), args);
  DFSZKFailoverController zkfc = DFSZKFailoverController.create(
      parser.getConfiguration());
  int retCode = 0;
  try {
    retCode = zkfc.run(parser.getRemainingArgs());
  } catch (Throwable t) {
    LOG.fatal("Got a fatal error, exiting now", t);
  }
  System.exit(retCode);
}
 
Example #6
Source File: TestAuditLogger.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that a broken audit logger causes requests to fail.
 */
@Test
public void testBrokenLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      BrokenAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    fs.setTimes(new Path("/"), time, time);
    fail("Expected exception due to broken audit logger.");
  } catch (RemoteException re) {
    // Expected.
  } finally {
    cluster.shutdown();
  }
}
 
Example #7
Source File: TestGetConf.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private String runTool(HdfsConfiguration conf, String[] args, boolean success)
    throws Exception {
  ByteArrayOutputStream o = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(o, true);
  try {
    int ret = ToolRunner.run(new GetConf(conf, out, out), args);
    out.flush();
    System.err.println("Output: " + o.toString());
    assertEquals("Expected " + (success?"success":"failure") +
        " for args: " + Joiner.on(" ").join(args) + "\n" +
        "Output: " + o.toString(),
        success, ret == 0);
    return o.toString();
  } finally {
    o.close();
    out.close();
  }
}
 
Example #8
Source File: TestGetConf.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test empty configuration
 */
@Test(timeout=10000)
public void testEmptyConf() throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration(false);
  // Verify getting addresses fails
  getAddressListFromTool(TestType.NAMENODE, conf, false);
  System.out.println(getAddressListFromTool(TestType.BACKUP, conf, false));
  getAddressListFromTool(TestType.SECONDARY, conf, false);
  getAddressListFromTool(TestType.NNRPCADDRESSES, conf, false);
  for (Command cmd : Command.values()) {
    String arg = cmd.getName();
    CommandHandler handler = Command.getHandler(arg);
    assertNotNull("missing handler: " + cmd, handler);
    if (handler.key != null) {
      // First test with configuration missing the required key
      String[] args = {handler.key};
      runTool(conf, args, false);
    }
  }
}
 
Example #9
Source File: TestDataNodeVolumeFailureReporting.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Initializes the cluster.
 *
 * @param numDataNodes number of datanodes
 * @param storagesPerDatanode number of storage locations on each datanode
 * @param failedVolumesTolerated number of acceptable volume failures
 * @throws Exception if there is any failure
 */
private void initCluster(int numDataNodes, int storagesPerDatanode,
    int failedVolumesTolerated) throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
      failedVolumesTolerated);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
      .storagesPerDatanode(storagesPerDatanode).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
  long dnCapacity = DFSTestUtil.getDatanodeCapacity(
      cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
  volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
}
 
Example #10
Source File: TestGetConf.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Get address list for a given type of address. Command expected to
 * fail if {@code success} is false.
 * @return returns the success or error output from the tool.
 */
private String getAddressListFromTool(TestType type, HdfsConfiguration conf,
    boolean success)
    throws Exception {
  String[] args = new String[1];
  switch (type) {
  case NAMENODE:
    args[0] = Command.NAMENODE.getName();
    break;
  case BACKUP:
    args[0] = Command.BACKUP.getName();
    break;
  case SECONDARY:
    args[0] = Command.SECONDARY.getName();
    break;
  case NNRPCADDRESSES:
    args[0] = Command.NNRPCADDRESSES.getName();
    break;
  }
  return runTool(conf, args, success);
}
 
Example #11
Source File: TestBackupNode.java    From big-c with Apache License 2.0 6 votes vote down vote up
BackupNode startBackupNode(Configuration conf,
                           StartupOption startupOpt,
                           int idx) throws IOException {
  Configuration c = new HdfsConfiguration(conf);
  String dirs = getBackupNodeDir(startupOpt, idx);
  c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
  c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
      "127.0.0.1:0");
  c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
          "127.0.0.1:0");

  BackupNode bn = (BackupNode)NameNode.createNameNode(
      new String[]{startupOpt.getName()}, c);
  assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
  assertTrue(bn.getRole() + " must be in StandbyState",
             bn.getNamesystem().getHAState()
               .equalsIgnoreCase(HAServiceState.STANDBY.name()));
  return bn;
}
 
Example #12
Source File: TestHostsFiles.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private Configuration getConf() {
  Configuration conf = new HdfsConfiguration();

  // Lower the heart beat interval so the NN quickly learns of dead
  // or decommissioned DNs and the NN issues replication and invalidation
  // commands quickly (as replies to heartbeats)
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);

  // Have the NN ReplicationMonitor compute the replication and
  // invalidation commands to send DNs every second.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);

  // Have the NN check for pending replications every second so it
  // quickly schedules additional replicas as they are identified.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);

  // The DNs report blocks every second.
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);

  // Indicates we have multiple racks
  conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
  return conf;
}
 
Example #13
Source File: TestDataNodeVolumeFailureToleration.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  // Allow a single volume failure (there are two volumes)
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
}
 
Example #14
Source File: TestAuditLogger.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Minor test related to HADOOP-9155. Verify that during a
 * FileSystem.setPermission() operation, the stat passed in during the
 * logAuditEvent() call returns the new permission rather than the old
 * permission.
 */
@Test
public void testAuditLoggerWithSetPermission() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      DummyAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    assertTrue(DummyAuditLogger.initialized);
    DummyAuditLogger.resetLogCount();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    final Path p = new Path("/");
    fs.setTimes(p, time, time);
    fs.setPermission(p, new FsPermission(TEST_PERMISSION));
    assertEquals(TEST_PERMISSION, DummyAuditLogger.foundPermission);
    assertEquals(2, DummyAuditLogger.logCount);
  } finally {
    cluster.shutdown();
  }
}
 
Example #15
Source File: TestDefaultBlockPlacementPolicy.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws IOException {
  StaticMapping.resetMap();
  Configuration conf = new HdfsConfiguration();
  final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
  final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };

  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
      .hosts(hosts).build();
  cluster.waitActive();
  nameNodeRpc = cluster.getNameNodeRpc();
  namesystem = cluster.getNamesystem();
  perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
      FsPermission.getDefault());
}
 
Example #16
Source File: TestWriteToReplica.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testClose() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
  
  try {
    cluster.waitActive();
    DataNode dn = cluster.getDataNodes().get(0);
    FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);

    // set up replicasMap
    String bpid = cluster.getNamesystem().getBlockPoolId();
    
    ExtendedBlock[] blocks = setup(bpid, dataSet);

    // test close
    testClose(dataSet, blocks);
  } finally {
    cluster.shutdown();
  }
}
 
Example #17
Source File: TestHistoryFileManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUpClass() throws Exception {
  coreSitePath = "." + File.separator + "target" + File.separator +
          "test-classes" + File.separator + "core-site.xml";
  Configuration conf = new HdfsConfiguration();
  Configuration conf2 = new HdfsConfiguration();
  dfsCluster = new MiniDFSCluster.Builder(conf).build();
  conf2.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
          conf.get(MiniDFSCluster.HDFS_MINIDFS_BASEDIR) + "_2");
  dfsCluster2 = new MiniDFSCluster.Builder(conf2).build();
}
 
Example #18
Source File: TestGetConf.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testIncludeInternalNameServices() throws Exception {
  final int nsCount = 10;
  final int remoteNsCount = 4;
  HdfsConfiguration conf = new HdfsConfiguration();
  setupNameServices(conf, nsCount);
  setupAddress(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsCount, 1000);
  setupAddress(conf, DFS_NAMENODE_RPC_ADDRESS_KEY, nsCount, 1500);
  conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "ns1");
  setupStaticHostResolution(nsCount);

  String[] includedNN = new String[] {"nn1:1001"};
  verifyAddresses(conf, TestType.NAMENODE, false, includedNN);
  verifyAddresses(conf, TestType.NNRPCADDRESSES, true, includedNN);
}
 
Example #19
Source File: TestGetConf.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout=10000)
public void testGetSpecificKey() throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set("mykey", " myval ");
  String[] args = {"-confKey", "mykey"};
  String toolResult = runTool(conf, args, true);
  assertEquals(String.format("myval%n"), toolResult);
}
 
Example #20
Source File: DfsTask.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Invoke {@link org.apache.hadoop.fs.FsShell#main} after a
 * few cursory checks of the configuration.
 */
public void execute() throws BuildException {
  if (null == cmd)
    throw new BuildException("Missing command (cmd) argument");
  argv.add(0, cmd);

  if (null == confloader) {
    setConf(getProject().getProperty("hadoop.conf.dir"));
  }

  int exit_code = 0;
  try {
    pushContext();

    Configuration conf = new HdfsConfiguration();
    conf.setClassLoader(confloader);
    exit_code = ToolRunner.run(conf, shell,
        argv.toArray(new String[argv.size()]));
    exit_code = postCmd(exit_code);

    if (0 > exit_code) {
      StringBuilder msg = new StringBuilder();
      for (String s : argv)
        msg.append(s + " ");
      msg.append("failed: " + exit_code);
      throw new Exception(msg.toString());
    }
  } catch (Exception e) {
    if (failonerror)
        throw new BuildException(e);
  } finally {
    popContext();
  }
}
 
Example #21
Source File: TestBalancer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test a cluster with even distribution,
 * then three nodes are added to the cluster,
 * runs balancer with two of the nodes in the exclude list
 */
@Test(timeout=100000)
public void testBalancerCliWithExcludeListWithPorts() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  initConf(conf);
  doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
      CAPACITY, RACK2, new PortNumberBasedNodes(3, 2, 0), true, false);
}
 
Example #22
Source File: TestDFSHAAdmin.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testFailoverWithInvalidFenceArg() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
  HdfsConfiguration conf = getHAConf();
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(-1, runTool("-failover", "nn1", "nn2", "notforcefence"));
}
 
Example #23
Source File: TestSaslDataTransfer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testAuthentication() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig(
    "authentication,integrity,privacy");
  startCluster(clusterConf);
  HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf);
  clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, "authentication");
  doTest(clientConf);
}
 
Example #24
Source File: TestTransferFsImage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Similar to the above test, except that there are multiple local files
 * and one of them can be saved.
 */
@Test
public void testClientSideExceptionOnJustOneDir() throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(0).build();
  NNStorage mockStorage = Mockito.mock(NNStorage.class);
  List<File> localPaths = ImmutableList.of(
      new File("/xxxxx-does-not-exist/blah"),
      new File(TEST_DIR, "testfile")    
      );
     
  try {
    URL fsName = DFSUtil.getInfoServer(
        cluster.getNameNode().getServiceRpcAddress(), conf,
        DFSUtil.getHttpClientScheme(conf)).toURL();

    String id = "getimage=1&txid=0";

    TransferFsImage.getFileClient(fsName, id, localPaths, mockStorage, false);      
    Mockito.verify(mockStorage).reportErrorOnFile(localPaths.get(0));
    assertTrue("The valid local file should get saved properly",
        localPaths.get(1).length() > 0);
  } finally {
    cluster.shutdown();      
  }
}
 
Example #25
Source File: TestINodeAttributeProvider.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws IOException {
  CALLED.clear();
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_NAMENODE_INODE_ATTRIBUTES_PROVIDER_KEY,
      MyAuthorizationProvider.class.getName());
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
  miniDFS = new MiniDFSCluster.Builder(conf).build();
}
 
Example #26
Source File: TestBalancer.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test a cluster with even distribution,
 * then three nodes are added to the cluster,
 * runs balancer with two of the nodes in the include list
 */
@Test(timeout=100000)
public void testBalancerWithIncludeListWithPorts() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  initConf(conf);
  doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
      CAPACITY, RACK2, new PortNumberBasedNodes(3, 0, 1), false, false);
}
 
Example #27
Source File: TestDFSHAAdmin.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testFailoverWithForceActive() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
  HdfsConfiguration conf = getHAConf();
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-failover", "nn1", "nn2", "--forceactive"));
}
 
Example #28
Source File: TestAuditLogs.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
 public void setupCluster() throws Exception {
   // must configure prior to instantiating the namesystem because it
   // will reconfigure the logger if async is enabled
   configureAuditLogs();
   conf = new HdfsConfiguration();
   final long precision = 1L;
   conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
   conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
   conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
   conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, useAsyncLog);
   util = new DFSTestUtil.Builder().setName("TestAuditAllowed").
       setNumFiles(20).build();
   cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
   fs = cluster.getFileSystem();
   util.createFiles(fs, fileName);

   // make sure the appender is what it's supposed to be
   Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
   @SuppressWarnings("unchecked")
   List<Appender> appenders = Collections.list(logger.getAllAppenders());
   assertEquals(1, appenders.size());
   assertEquals(useAsyncLog, appenders.get(0) instanceof AsyncAppender);
   
   fnames = util.getFileNames(fileName);
   util.waitReplication(fs, fileName, (short)3);
   userGroupInfo = UserGroupInformation.createUserForTesting(username, groups);
}
 
Example #29
Source File: TestBlockUnderConstruction.java    From big-c with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  Configuration conf = new HdfsConfiguration();
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  cluster.waitActive();
  hdfs = cluster.getFileSystem();
}
 
Example #30
Source File: TestBalancer.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test a cluster with even distribution,
 * then three nodes are added to the cluster,
 * runs balancer with two of the nodes in the exclude list
 */
@Test(timeout=100000)
public void testBalancerWithExcludeList() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  initConf(conf);
  Set<String> excludeHosts = new HashSet<String>();
  excludeHosts.add( "datanodeY");
  excludeHosts.add( "datanodeZ");
  doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
      new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
      excludeHosts, Parameters.DEFAULT.nodesToBeIncluded), false, false);
}