Java Code Examples for org.apache.hadoop.hdfs.HdfsConfiguration

The following examples show how to use org.apache.hadoop.hdfs.HdfsConfiguration. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: TestDefaultBlockPlacementPolicy.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws IOException {
  StaticMapping.resetMap();
  Configuration conf = new HdfsConfiguration();
  final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
  final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };

  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
  conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
      .hosts(hosts).build();
  cluster.waitActive();
  nameNodeRpc = cluster.getNameNodeRpc();
  namesystem = cluster.getNamesystem();
  perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
      FsPermission.getDefault());
}
 
Example 2
Source Project: big-c   Source File: TestBackupNode.java    License: Apache License 2.0 6 votes vote down vote up
BackupNode startBackupNode(Configuration conf,
                           StartupOption startupOpt,
                           int idx) throws IOException {
  Configuration c = new HdfsConfiguration(conf);
  String dirs = getBackupNodeDir(startupOpt, idx);
  c.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dirs);
  c.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      "${" + DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + "}");
  c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY,
      "127.0.0.1:0");
  c.set(DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
          "127.0.0.1:0");

  BackupNode bn = (BackupNode)NameNode.createNameNode(
      new String[]{startupOpt.getName()}, c);
  assertTrue(bn.getRole() + " must be in SafeMode.", bn.isInSafeMode());
  assertTrue(bn.getRole() + " must be in StandbyState",
             bn.getNamesystem().getHAState()
               .equalsIgnoreCase(HAServiceState.STANDBY.name()));
  return bn;
}
 
Example 3
Source Project: big-c   Source File: TestDataNodeVolumeFailureReporting.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Initializes the cluster.
 *
 * @param numDataNodes number of datanodes
 * @param storagesPerDatanode number of storage locations on each datanode
 * @param failedVolumesTolerated number of acceptable volume failures
 * @throws Exception if there is any failure
 */
private void initCluster(int numDataNodes, int storagesPerDatanode,
    int failedVolumesTolerated) throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
      failedVolumesTolerated);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes)
      .storagesPerDatanode(storagesPerDatanode).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
  long dnCapacity = DFSTestUtil.getDatanodeCapacity(
      cluster.getNamesystem().getBlockManager().getDatanodeManager(), 0);
  volumeCapacity = dnCapacity / cluster.getStoragesPerDatanode();
}
 
Example 4
Source Project: hadoop   Source File: TestGetConf.java    License: Apache License 2.0 6 votes vote down vote up
private String runTool(HdfsConfiguration conf, String[] args, boolean success)
    throws Exception {
  ByteArrayOutputStream o = new ByteArrayOutputStream();
  PrintStream out = new PrintStream(o, true);
  try {
    int ret = ToolRunner.run(new GetConf(conf, out, out), args);
    out.flush();
    System.err.println("Output: " + o.toString());
    assertEquals("Expected " + (success?"success":"failure") +
        " for args: " + Joiner.on(" ").join(args) + "\n" +
        "Output: " + o.toString(),
        success, ret == 0);
    return o.toString();
  } finally {
    o.close();
    out.close();
  }
}
 
Example 5
Source Project: big-c   Source File: TestGetConf.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test empty configuration
 */
@Test(timeout=10000)
public void testEmptyConf() throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration(false);
  // Verify getting addresses fails
  getAddressListFromTool(TestType.NAMENODE, conf, false);
  System.out.println(getAddressListFromTool(TestType.BACKUP, conf, false));
  getAddressListFromTool(TestType.SECONDARY, conf, false);
  getAddressListFromTool(TestType.NNRPCADDRESSES, conf, false);
  for (Command cmd : Command.values()) {
    String arg = cmd.getName();
    CommandHandler handler = Command.getHandler(arg);
    assertNotNull("missing handler: " + cmd, handler);
    if (handler.key != null) {
      // First test with configuration missing the required key
      String[] args = {handler.key};
      runTool(conf, args, false);
    }
  }
}
 
Example 6
Source Project: big-c   Source File: TestAuditLogger.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that TopAuditLogger can be disabled
 */
@Test
public void testDisableTopAuditLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(NNTOP_ENABLED_KEY, false);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    List<AuditLogger> auditLoggers =
        cluster.getNameNode().getNamesystem().getAuditLoggers();
    for (AuditLogger auditLogger : auditLoggers) {
      assertFalse(
          "top audit logger is still hooked in after it is disabled",
          auditLogger instanceof TopAuditLogger);
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 7
Source Project: big-c   Source File: FileChecksumServlets.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response
    ) throws ServletException, IOException {
  final PrintWriter out = response.getWriter();
  final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
  final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
  xml.declaration();

  final ServletContext context = getServletContext();
  final DataNode datanode = (DataNode) context.getAttribute("datanode");
  final Configuration conf = 
    new HdfsConfiguration(datanode.getConf());
  
  try {
    final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, 
        datanode, conf, getUGI(request, conf));
    final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE);
    MD5MD5CRC32FileChecksum.write(xml, checksum);
  } catch(IOException ioe) {
    writeXml(ioe, path, xml);
  } catch (InterruptedException e) {
    writeXml(e, path, xml);
  }
  xml.endDocument();
}
 
Example 8
Source Project: big-c   Source File: DFSZKFailoverController.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String args[])
    throws Exception {
  if (DFSUtil.parseHelpArgument(args, 
      ZKFailoverController.USAGE, System.out, true)) {
    System.exit(0);
  }
  
  GenericOptionsParser parser = new GenericOptionsParser(
      new HdfsConfiguration(), args);
  DFSZKFailoverController zkfc = DFSZKFailoverController.create(
      parser.getConfiguration());
  int retCode = 0;
  try {
    retCode = zkfc.run(parser.getRemainingArgs());
  } catch (Throwable t) {
    LOG.fatal("Got a fatal error, exiting now", t);
  }
  System.exit(retCode);
}
 
Example 9
Source Project: big-c   Source File: TestAuditLogger.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that a broken audit logger causes requests to fail.
 */
@Test
public void testBrokenLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      BrokenAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    fs.setTimes(new Path("/"), time, time);
    fail("Expected exception due to broken audit logger.");
  } catch (RemoteException re) {
    // Expected.
  } finally {
    cluster.shutdown();
  }
}
 
Example 10
Source Project: hadoop   Source File: TestGetConf.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Get address list for a given type of address. Command expected to
 * fail if {@code success} is false.
 * @return returns the success or error output from the tool.
 */
private String getAddressListFromTool(TestType type, HdfsConfiguration conf,
    boolean success)
    throws Exception {
  String[] args = new String[1];
  switch (type) {
  case NAMENODE:
    args[0] = Command.NAMENODE.getName();
    break;
  case BACKUP:
    args[0] = Command.BACKUP.getName();
    break;
  case SECONDARY:
    args[0] = Command.SECONDARY.getName();
    break;
  case NNRPCADDRESSES:
    args[0] = Command.NNRPCADDRESSES.getName();
    break;
  }
  return runTool(conf, args, success);
}
 
Example 11
Source Project: hadoop   Source File: TestWriteToReplica.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testClose() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
  
  try {
    cluster.waitActive();
    DataNode dn = cluster.getDataNodes().get(0);
    FsDatasetImpl dataSet = (FsDatasetImpl)DataNodeTestUtils.getFSDataset(dn);

    // set up replicasMap
    String bpid = cluster.getNamesystem().getBlockPoolId();
    
    ExtendedBlock[] blocks = setup(bpid, dataSet);

    // test close
    testClose(dataSet, blocks);
  } finally {
    cluster.shutdown();
  }
}
 
Example 12
Source Project: big-c   Source File: TestAuditLogger.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Minor test related to HADOOP-9155. Verify that during a
 * FileSystem.setPermission() operation, the stat passed in during the
 * logAuditEvent() call returns the new permission rather than the old
 * permission.
 */
@Test
public void testAuditLoggerWithSetPermission() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      DummyAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    assertTrue(DummyAuditLogger.initialized);
    DummyAuditLogger.resetLogCount();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    final Path p = new Path("/");
    fs.setTimes(p, time, time);
    fs.setPermission(p, new FsPermission(TEST_PERMISSION));
    assertEquals(TEST_PERMISSION, DummyAuditLogger.foundPermission);
    assertEquals(2, DummyAuditLogger.logCount);
  } finally {
    cluster.shutdown();
  }
}
 
Example 13
@Before
public void setUp() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512L);
  /*
   * Lower the DN heartbeat, DF rate, and recheck interval to one second
   * so state about failures and datanode death propagates faster.
   */
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_DF_INTERVAL_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
  // Allow a single volume failure (there are two volumes)
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  dataDir = cluster.getDataDirectory();
}
 
Example 14
Source Project: big-c   Source File: DfsServlet.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
 */
protected ClientProtocol createNameNodeProxy() throws IOException {
  ServletContext context = getServletContext();
  // if we are running in the Name Node, use it directly rather than via 
  // rpc
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
  if (nn != null) {
    return nn.getRpcServer();
  }
  InetSocketAddress nnAddr =
    NameNodeHttpServer.getNameNodeAddressFromContext(context);
  Configuration conf = new HdfsConfiguration(
      NameNodeHttpServer.getConfFromContext(context));
  return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
      ClientProtocol.class).getProxy();
}
 
Example 15
Source Project: hadoop   Source File: TestLossyRetryInvocationHandler.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testStartNNWithTrashEmptier() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new HdfsConfiguration();
  
  // enable both trash emptier and dropping response
  conf.setLong("fs.trash.interval", 360);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2);
  
  try {
    cluster = new MiniDFSCluster.Builder(conf)
        .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0)
        .build();
    cluster.waitActive();
    cluster.transitionToActive(0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 16
Source Project: hadoop   Source File: TestHostsFiles.java    License: Apache License 2.0 6 votes vote down vote up
private Configuration getConf() {
  Configuration conf = new HdfsConfiguration();

  // Lower the heart beat interval so the NN quickly learns of dead
  // or decommissioned DNs and the NN issues replication and invalidation
  // commands quickly (as replies to heartbeats)
  conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);

  // Have the NN ReplicationMonitor compute the replication and
  // invalidation commands to send DNs every second.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);

  // Have the NN check for pending replications every second so it
  // quickly schedules additional replicas as they are identified.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);

  // The DNs report blocks every second.
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);

  // Indicates we have multiple racks
  conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
  return conf;
}
 
Example 17
Source Project: big-c   Source File: TestNameEditsConfigs.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test dfs.namenode.checkpoint.dir and dfs.namenode.checkpoint.edits.dir
 * should tolerate white space between values.
 */
@Test
public void testCheckPointDirsAreTrimmed() throws Exception {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  File checkpointNameDir1 = new File(base_dir, "chkptName1");
  File checkpointEditsDir1 = new File(base_dir, "chkptEdits1");
  File checkpointNameDir2 = new File(base_dir, "chkptName2");
  File checkpointEditsDir2 = new File(base_dir, "chkptEdits2");
  File nameDir = new File(base_dir, "name1");
  String whiteSpace = "  \n   \n  ";
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getPath());
  conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, whiteSpace
      + checkpointNameDir1.getPath() + whiteSpace, whiteSpace
      + checkpointNameDir2.getPath() + whiteSpace);
  conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
      whiteSpace + checkpointEditsDir1.getPath() + whiteSpace, whiteSpace
          + checkpointEditsDir2.getPath() + whiteSpace);
  cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
      .numDataNodes(3).build();
  try {
    cluster.waitActive();
    secondary = startSecondaryNameNode(conf);
    secondary.doCheckpoint();
    assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
        checkpointNameDir1.exists());
    assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
        checkpointNameDir2.exists());
    assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
        + " must be trimmed ", checkpointEditsDir1.exists());
    assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
        + " must be trimmed ", checkpointEditsDir2.exists());
  } finally {
    secondary.shutdown();
    cluster.shutdown();
  }
}
 
Example 18
Source Project: hadoop   Source File: TestBalancer.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test a cluster with even distribution,
 * then three nodes are added to the cluster,
 * runs balancer with two of the nodes in the exclude list in a file
 */
@Test(timeout=100000)
public void testBalancerCliWithExcludeListInAFile() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  initConf(conf);
  Set<String> excludeHosts = new HashSet<String>();
  excludeHosts.add( "datanodeY");
  excludeHosts.add( "datanodeZ");
  doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1}, CAPACITY, RACK2,
      new HostNameBasedNodes(new String[] {"datanodeX", "datanodeY", "datanodeZ"},
      excludeHosts, Parameters.DEFAULT.nodesToBeIncluded), true, true);
}
 
Example 19
Source Project: hadoop   Source File: TestDFSHAAdmin.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testFailoverWithFencerConfigured() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
  HdfsConfiguration conf = getHAConf();
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-failover", "nn1", "nn2"));
}
 
Example 20
Source Project: big-c   Source File: TestSaslDataTransfer.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testDataNodeAbortsIfNotHttpsOnly() throws Exception {
  HdfsConfiguration clusterConf = createSecureConfig("authentication");
  clusterConf.set(DFS_HTTP_POLICY_KEY,
    HttpConfig.Policy.HTTP_AND_HTTPS.name());
  exception.expect(RuntimeException.class);
  exception.expectMessage("Cannot start secure DataNode");
  startCluster(clusterConf);
}
 
Example 21
Source Project: big-c   Source File: TestFsck.java    License: Apache License 2.0 5 votes vote down vote up
/** Test if fsck can return -1 in case of failure
 * 
 * @throws Exception
 */
@Test
public void testFsckError() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    // bring up a one-node cluster
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).build();
    String fileName = "/test.txt";
    Path filePath = new Path(fileName);
    FileSystem fs = cluster.getFileSystem();
    
    // create a one-block file
    DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
    DFSTestUtil.waitReplication(fs, filePath, (short)1);
    
    // intentionally corrupt NN data structure
    INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
        (fileName, true);
    final BlockInfoContiguous[] blocks = node.getBlocks();
    assertEquals(blocks.length, 1);
    blocks[0].setNumBytes(-1L);  // set the block length to be negative
    
    // run fsck and expect a failure with -1 as the error code
    String outStr = runFsck(conf, -1, true, fileName);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
    
    // clean up file system
    fs.delete(filePath, true);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 22
Source Project: hadoop   Source File: TransferFsImage.java    License: Apache License 2.0 5 votes vote down vote up
private static void setTimeout(HttpURLConnection connection) {
  if (timeout <= 0) {
    Configuration conf = new HdfsConfiguration();
    timeout = conf.getInt(DFSConfigKeys.DFS_IMAGE_TRANSFER_TIMEOUT_KEY,
        DFSConfigKeys.DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT);
    LOG.info("Image Transfer timeout configured to " + timeout
        + " milliseconds");
  }

  if (timeout > 0) {
    connection.setConnectTimeout(timeout);
    connection.setReadTimeout(timeout);
  }
}
 
Example 23
Source Project: big-c   Source File: HDFSContract.java    License: Apache License 2.0 5 votes vote down vote up
public static void createCluster() throws IOException {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.addResource(CONTRACT_HDFS_XML);
  //hack in a 256 byte block size
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);

  cluster =
    new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
  cluster.waitClusterUp();
}
 
Example 24
Source Project: big-c   Source File: TestNamenodeRetryCache.java    License: Apache License 2.0 5 votes vote down vote up
/** Start a cluster */
@Before
public void setup() throws Exception {
  conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
  cluster = new MiniDFSCluster.Builder(conf).build();
  cluster.waitActive();
  nnRpc = cluster.getNameNode().getRpcServer();
  filesystem = cluster.getFileSystem();
}
 
Example 25
Source Project: hadoop   Source File: SecureDataNodeStarter.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void init(DaemonContext context) throws Exception {
  System.err.println("Initializing secure datanode resources");
  // Create a new HdfsConfiguration object to ensure that the configuration in
  // hdfs-site.xml is picked up.
  Configuration conf = new HdfsConfiguration();
  
  // Stash command-line arguments for regular datanode
  args = context.getArguments();
  resources = getSecureResources(conf);
}
 
Example 26
Source Project: hadoop   Source File: TestGetConf.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Using DFSUtil methods get the list of given {@code type} of address
 */
private Map<String, Map<String, InetSocketAddress>> getAddressListFromConf(
    TestType type, HdfsConfiguration conf) throws IOException {
  switch (type) {
  case NAMENODE:
    return DFSUtil.getNNServiceRpcAddressesForCluster(conf);
  case BACKUP:
    return DFSUtil.getBackupNodeAddresses(conf);
  case SECONDARY:
    return DFSUtil.getSecondaryNameNodeAddresses(conf);
  case NNRPCADDRESSES:
    return DFSUtil.getNNServiceRpcAddressesForCluster(conf);
  }
  return null;
}
 
Example 27
Source Project: hadoop   Source File: TestGetConf.java    License: Apache License 2.0 5 votes vote down vote up
private void verifyAddresses(HdfsConfiguration conf, TestType type,
    boolean checkPort, String... expected) throws Exception {
  // Ensure DFSUtil returned the right set of addresses
  Map<String, Map<String, InetSocketAddress>> map =
    getAddressListFromConf(type, conf);
  List<ConfiguredNNAddress> list = DFSUtil.flattenAddressMap(map);
  String[] actual = toStringArray(list);
  Arrays.sort(actual);
  Arrays.sort(expected);
  assertArrayEquals(expected, actual);

  // Test GetConf returned addresses
  getAddressListFromTool(type, conf, checkPort, list);
}
 
Example 28
Source Project: big-c   Source File: TestSequentialBlockId.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test that block IDs are generated sequentially.
 *
 * @throws IOException
 */
@Test
public void testBlockIdGeneration() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();

    // Create a file that is 10 blocks long.
    Path path = new Path("testBlockIdGeneration.dat");
    DFSTestUtil.createFile(
        fs, path, IO_SIZE, BLOCK_SIZE * 10, BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs, path);
    LOG.info("Block0 id is " + blocks.get(0).getBlock().getBlockId());
    long nextBlockExpectedId = blocks.get(0).getBlock().getBlockId() + 1;

    // Ensure that the block IDs are sequentially increasing.
    for (int i = 1; i < blocks.size(); ++i) {
      long nextBlockId = blocks.get(i).getBlock().getBlockId();
      LOG.info("Block" + i + " id is " + nextBlockId);
      assertThat(nextBlockId, is(nextBlockExpectedId));
      ++nextBlockExpectedId;
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 29
Source Project: big-c   Source File: TestGetConf.java    License: Apache License 2.0 5 votes vote down vote up
@Test(timeout=10000)
public void testGetSpecificKey() throws Exception {
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set("mykey", " myval ");
  String[] args = {"-confKey", "mykey"};
  String toolResult = runTool(conf, args, true);
  assertEquals(String.format("myval%n"), toolResult);
}
 
Example 30
Source Project: big-c   Source File: TestDFSHAAdmin.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testFailoverWithFencerConfigured() throws Exception {
  Mockito.doReturn(STANDBY_READY_RESULT).when(mockProtocol).getServiceStatus();
  HdfsConfiguration conf = getHAConf();
  conf.set(DFSConfigKeys.DFS_HA_FENCE_METHODS_KEY, getFencerTrueCommand());
  tool.setConf(conf);
  assertEquals(0, runTool("-failover", "nn1", "nn2"));
}