org.apache.hadoop.hdfs.server.namenode.FSNamesystem Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.FSNamesystem. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestDeadDatanode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * wait for datanode to reach alive or dead state for waitTime given in
 * milliseconds.
 */
private void waitForDatanodeState(DatanodeID nodeID, boolean alive, int waitTime)
    throws TimeoutException, InterruptedException, IOException {
  long stopTime = System.currentTimeMillis() + waitTime;
  FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
  String state = alive ? "alive" : "dead";
  while (System.currentTimeMillis() < stopTime) {
    if (namesystem.getDatanode(nodeID).isAlive == alive) {
      LOG.info("datanode " + nodeID + " is " + state);
      return;
    }
    LOG.info("Waiting for datanode " + nodeID + " to become " + state);
    Thread.sleep(1000);
  }
  throw new TimeoutException("Timedout waiting for datanode reach state "
      + state);
}
 
Example #2
Source File: TestHASafeMode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test case for enter safemode in active namenode, when it is already in startup safemode.
 * It is a regression test for HDFS-2747.
 */
@Test
public void testEnterSafeModeInANNShouldNotThrowNPE() throws Exception {
  banner("Restarting active");
  DFSTestUtil
    .createFile(fs, new Path("/test"), 3 * BLOCK_SIZE, (short) 3, 1L);
  restartActive();
  nn0.getRpcServer().transitionToActive(
      new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));

  FSNamesystem namesystem = nn0.getNamesystem();
  String status = namesystem.getSafemode();
  assertTrue("Bad safemode status: '" + status + "'", status
      .startsWith("Safe mode is ON."));
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
  NameNodeAdapter.enterSafeMode(nn0, false);
  assertTrue("Failed to enter into safemode in active", namesystem
      .isInSafeMode());
}
 
Example #3
Source File: TestFailureOfSharedDir.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test that the shared edits dir is automatically added to the list of edits
 * dirs that are marked required.
 */
@Test
public void testSharedDirIsAutomaticallyMarkedRequired()
    throws URISyntaxException {
  URI foo = new URI("file:/foo");
  URI bar = new URI("file:/bar");
  Configuration conf = new Configuration();
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, Joiner.on(",").join(foo, bar));
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY, foo.toString());
  assertFalse(FSNamesystem.getRequiredNamespaceEditsDirs(conf).contains(
      bar));
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, bar.toString());
  Collection<URI> requiredEditsDirs = FSNamesystem
      .getRequiredNamespaceEditsDirs(conf); 
  assertTrue(Joiner.on(",").join(requiredEditsDirs) + " does not contain " + bar,
      requiredEditsDirs.contains(bar));
}
 
Example #4
Source File: TestOverReplicatedBlocks.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
Example #5
Source File: TestDecommission.java    From big-c with Apache License 2.0 6 votes vote down vote up
private void verifyStats(NameNode namenode, FSNamesystem fsn,
    DatanodeInfo info, DataNode node, boolean decommissioning)
    throws InterruptedException, IOException {
  // Do the stats check over 10 heartbeats
  for (int i = 0; i < 10; i++) {
    long[] newStats = namenode.getRpcServer().getStats();

    // For decommissioning nodes, ensure capacity of the DN is no longer
    // counted. Only used space of the DN is counted in cluster capacity
    assertEquals(newStats[0],
        decommissioning ? info.getDfsUsed() : info.getCapacity());

    // Ensure cluster used capacity is counted for both normal and
    // decommissioning nodes
    assertEquals(newStats[1], info.getDfsUsed());

    // For decommissioning nodes, remaining space from the DN is not counted
    assertEquals(newStats[2], decommissioning ? 0 : info.getRemaining());

    // Ensure transceiver count is same as that DN
    assertEquals(fsn.getTotalLoad(), info.getXceiverCount());
    DataNodeTestUtils.triggerHeartbeat(node);
  }
}
 
Example #6
Source File: TestStandbySafeMode.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Test
public void testDeadDatanodeFailover() throws Exception {
  setUp(false);
  h.setIgnoreDatanodes(false);
  // Create test files.
  createTestFiles("/testDeadDatanodeFailover");
  cluster.shutDownDataNode(0);
  FSNamesystem ns = cluster.getStandbyAvatar(0).avatar.namesystem;
  StandbySafeMode safeMode = cluster.getStandbyAvatar(0).avatar.getStandbySafeMode();
  new ExitSafeMode(safeMode, ns).start();
  cluster.failOver();
  // One datanode should be removed after failover
  assertEquals(2,
      cluster.getPrimaryAvatar(0).avatar.namesystem
          .datanodeReport(DatanodeReportType.LIVE).length);
  assertTrue(pass);
}
 
Example #7
Source File: TestWebHdfsUrl.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private WebHdfsFileSystem getWebHdfsFileSystem(UserGroupInformation ugi,
    Configuration conf) throws IOException {
  if (UserGroupInformation.isSecurityEnabled()) {
    DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
        ugi.getUserName()), null, null);
    FSNamesystem namesystem = mock(FSNamesystem.class);
    DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(
        86400000, 86400000, 86400000, 86400000, namesystem);
    dtSecretManager.startThreads();
    Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(
        dtId, dtSecretManager);
    SecurityUtil.setTokenService(
        token, NetUtils.createSocketAddr(uri.getAuthority()));
    token.setKind(WebHdfsFileSystem.TOKEN_KIND);
    ugi.addToken(token);
  }
  return (WebHdfsFileSystem) FileSystem.get(uri, conf);
}
 
Example #8
Source File: TestBlocksWithNotEnoughRacks.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testSufficientlySingleReplBlockUsesNewRack() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 1;
  final Path filePath = new Path("/testFile");

  String racks[] = {"/rack1", "/rack1", "/rack1", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block with a replication factor of 1
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);

    REPLICATION_FACTOR = 2;
    NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
Example #9
Source File: TestDecommission.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public void testClusterStats(int numNameNodes, boolean federation) throws IOException,
    InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf, federation);
  
  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);
    
    NameNode namenode = cluster.getNameNode(i);
    FSNamesystem fsn = namenode.namesystem;
    DatanodeInfo downnode = decommissionNode(i, null,
        AdminStates.DECOMMISSION_INPROGRESS);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, downnode, true);
    
    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    fsn.refreshNodes(conf);
    DatanodeInfo ret = fsn.getDatanode(downnode);
    waitNodeState(ret, AdminStates.NORMAL);
    verifyStats(namenode, fsn, ret, false);
  }
}
 
Example #10
Source File: TestFailureOfSharedDir.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Make sure that the shared edits dirs are listed before non-shared dirs
 * when the configuration is parsed. This ensures that the shared journals
 * are synced before the local ones.
 */
@Test
public void testSharedDirsComeFirstInEditsList() throws Exception {
  Configuration conf = new Configuration();
  URI sharedA = new URI("file:///shared-A");
  URI localA = new URI("file:///local-A");
  URI localB = new URI("file:///local-B");
  URI localC = new URI("file:///local-C");
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
      sharedA.toString());
  // List them in reverse order, to make sure they show up in
  // the order listed, regardless of lexical sort order.
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      Joiner.on(",").join(localC, localB, localA));
  List<URI> dirs = FSNamesystem.getNamespaceEditsDirs(conf);
  assertEquals(
      "Shared dirs should come first, then local dirs, in the order " +
      "they were listed in the configuration.",
      Joiner.on(",").join(sharedA, localC, localB, localA),
      Joiner.on(",").join(dirs));
}
 
Example #11
Source File: TestTotalFiles.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Test
public void testRestartWithSaveNamespace() throws Exception {
  String topDir = "/testRestartWithSaveNamespace";
  FSNamesystem namesystem = null;
  int totalFiles = 0;
  for (int i = 0; i < 10; i++) {
    DFSTestUtil util = new DFSTestUtil(topDir, 5, 10, MAX_FILE_SIZE);
    util.createFiles(fs, topDir);
    DFSTestUtil util1 = new DFSTestUtil(topDir, 5, 1, MAX_FILE_SIZE);
    util1.createFiles(fs, topDir);
    totalFiles += 10;
    totalFiles -= deleteFiles(util, topDir);
    totalFiles -= concatFiles(util1, topDir);
    if (random.nextBoolean()) {
      cluster.getNameNode().saveNamespace(true, false);
    }
    namesystem = cluster.getNameNode().namesystem;
    assertEquals(totalFiles, namesystem.getFilesTotal());
    cluster.restartNameNodes();
    namesystem = cluster.getNameNode().namesystem;
    assertEquals(totalFiles, namesystem.getFilesTotal());
  }

  assertTrue(namesystem.getFilesAndDirectoriesTotal() > namesystem
      .getFilesTotal());
}
 
Example #12
Source File: TestOverReplicatedBlocks.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
Example #13
Source File: TestTotalFiles.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Test
public void testConcat() throws Exception {
  String topDir = "/testConcat";
  DFSTestUtil util = new DFSTestUtil(topDir, 100, 1, MAX_FILE_SIZE);
  util.createFiles(fs, topDir);
  FSNamesystem namesystem = cluster.getNameNode().namesystem;
  assertEquals(100, namesystem.getFilesTotal());
  assertTrue(namesystem.getFilesAndDirectoriesTotal() > namesystem
      .getFilesTotal());
  String[] files = util.getFileNames(topDir);
  for (int i = 0; i < files.length; i += 10) {
    String target = files[i];
    String[] srcs = Arrays.copyOfRange(files, i + 1, i + 10);
    cluster.getNameNode().concat(target, srcs, false);
  }
  assertEquals(10, namesystem.getFilesTotal());
}
 
Example #14
Source File: TestTotalFiles.java    From RDFS with Apache License 2.0 6 votes vote down vote up
@Test
public void testRestart() throws Exception {
  String topDir = "testRestart";
  DFSTestUtil util = new DFSTestUtil(topDir, 100, 10, MAX_FILE_SIZE);
  util.createFiles(fs, topDir);
  FSNamesystem namesystem = cluster.getNameNode().namesystem;
  assertEquals(100, namesystem.getFilesTotal());
  assertTrue(namesystem.getFilesAndDirectoriesTotal() > namesystem
      .getFilesTotal());

  cluster.restartNameNodes();
  namesystem = cluster.getNameNode().namesystem;
  assertEquals(100, namesystem.getFilesTotal());
  assertTrue(namesystem.getFilesAndDirectoriesTotal() > namesystem
      .getFilesTotal());
}
 
Example #15
Source File: FSImageFormatPBSnapshot.java    From big-c with Apache License 2.0 5 votes vote down vote up
public Saver(FSImageFormatProtobuf.Saver parent,
    FileSummary.Builder headers, SaveNamespaceContext context,
    FSNamesystem fsn) {
  this.parent = parent;
  this.headers = headers;
  this.context = context;
  this.fsn = fsn;
}
 
Example #16
Source File: TestRbwReportSafeMode.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void waitForBlocks() throws Exception {
  FSNamesystem namesystem = cluster.getNameNode().namesystem;
  long totalBlocks = namesystem.getBlocksTotal();
  long safeBlocks = namesystem.getSafeBlocks();
  while (totalBlocks > safeBlocks) {
    System.out.println("Waiting for blocks, total : " + totalBlocks
        + " safe : " + safeBlocks);
    Thread.sleep(1000);
    totalBlocks = namesystem.getBlocksTotal();
    safeBlocks = namesystem.getSafeBlocks();
  }
}
 
Example #17
Source File: TestFailoverWithBlockTokensEnabled.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static void lowerKeyUpdateIntervalAndClearKeys(FSNamesystem namesystem) {
  BlockTokenSecretManager btsm = namesystem.getBlockManager()
      .getBlockTokenSecretManager();
  btsm.setKeyUpdateIntervalForTesting(2 * 1000);
  btsm.setTokenLifetime(2 * 1000);
  btsm.clearAllKeysForTesting();
}
 
Example #18
Source File: TestHAConfiguration.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the namenode edits dirs and shared edits dirs are gotten with
 * duplicates removed
 */
@Test
public void testHAUniqueEditDirs() throws IOException {
  Configuration conf = new Configuration();

  conf.set(DFS_NAMENODE_EDITS_DIR_KEY, "file://edits/dir, "
      + "file://edits/shared/dir"); // overlapping
  conf.set(DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "file://edits/shared/dir");

  // getNamespaceEditsDirs removes duplicates across edits and shared.edits
  Collection<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf);
  assertEquals(2, editsDirs.size());
}
 
Example #19
Source File: FSNamesystemMetrics.java    From RDFS with Apache License 2.0 5 votes vote down vote up
public FSNamesystemMetrics(Configuration conf, FSNamesystem ns) {
  fsNameSystem = ns;
  String sessionId = conf.get("session.id");
   
  // Create a record for FSNamesystem metrics
  MetricsContext metricsContext = MetricsUtil.getContext("dfs");
  metricsRecord = MetricsUtil.createRecord(metricsContext, "FSNamesystem");
  metricsRecord.setTag("sessionId", sessionId);
  metricsContext.registerUpdater(this);
  log.info("Initializing FSNamesystemMetrics using context object:" +
            metricsContext.getClass().getName());
}
 
Example #20
Source File: SnapshotTestHelper.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
  final String[] lognames = {
      "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
      "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
  };
  for(String n : lognames) {
    GenericTestUtils.disableLog(LogFactory.getLog(n));
  }
  
  GenericTestUtils.disableLog(LogFactory.getLog(UserGroupInformation.class));
  GenericTestUtils.disableLog(LogFactory.getLog(BlockManager.class));
  GenericTestUtils.disableLog(LogFactory.getLog(FSNamesystem.class));
  GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class));
  GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class));
  
  GenericTestUtils.disableLog(BlockScanner.LOG);
  GenericTestUtils.disableLog(HttpServer2.LOG);
  GenericTestUtils.disableLog(DataNode.LOG);
  GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG);
  GenericTestUtils.disableLog(LeaseManager.LOG);
  GenericTestUtils.disableLog(NameNode.stateChangeLog);
  GenericTestUtils.disableLog(NameNode.blockStateChangeLog);
  GenericTestUtils.disableLog(DFSClient.LOG);
  GenericTestUtils.disableLog(Server.LOG);
}
 
Example #21
Source File: TestBlocksWithNotEnoughRacks.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testReduceReplFactorRespectsRackPolicy() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 3;
  final Path filePath = new Path("/testFile");
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decrease the replication factor, make sure the deleted replica
    // was not the one that lived on the rack with only one replica,
    // ie we should still have 2 racks after reducing the repl factor.
    REPLICATION_FACTOR = 2;
    NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR); 

    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
Example #22
Source File: Ingest.java    From RDFS with Apache License 2.0 5 votes vote down vote up
Ingest(Standby standby, FSNamesystem ns, Configuration conf, File edits) 
throws IOException {
  this.fsNamesys = ns;
  this.standby = standby;
  this.confg = conf;
  this.ingestFile = edits;
  catchUpLag = conf.getLong("avatar.catchup.lag", 2 * 1024 * 1024L);
}
 
Example #23
Source File: FSNamesystemMetrics.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Since this object is a registered updater, this method will be called
 * periodically, e.g. every 5 seconds.
 * We set the metrics value within  this function before pushing it out. 
 * FSNamesystem updates its own local variables which are
 * light weight compared to Metrics counters. 
 *
 * Some of the metrics are explicity casted to int. Few metrics collectors
 * do not handle long values. It is safe to cast to int for now as all these
 * values fit in int value.
 * Metrics related to DFS capacity are stored in bytes which do not fit in 
 * int, so they are rounded to GB
 */
public void doUpdates(MetricsContext unused) {
  /** 
   * ToFix
   * If the metrics counter were instead stored in the metrics objects themselves
   * we could avoid copying the values on each update.
   */
  synchronized (this) {
    FSNamesystem fsNameSystem = FSNamesystem.getFSNamesystem();
    filesTotal.set((int)fsNameSystem.getFilesTotal());
    blocksTotal.set((int)fsNameSystem.getBlocksTotal());
    capacityTotalGB.set(roundBytesToGBytes(fsNameSystem.getCapacityTotal()));
    capacityUsedGB.set(roundBytesToGBytes(fsNameSystem.getCapacityUsed()));
    capacityRemainingGB.set(roundBytesToGBytes(fsNameSystem.
                                             getCapacityRemaining()));
    totalLoad.set(fsNameSystem.getTotalLoad());
    corruptBlocks.set((int)fsNameSystem.getCorruptReplicaBlocks());
    excessBlocks.set((int)fsNameSystem.getExcessBlocks());
    pendingDeletionBlocks.set((int)fsNameSystem.getPendingDeletionBlocks());
    pendingReplicationBlocks.set((int)fsNameSystem.
                                 getPendingReplicationBlocks());
    underReplicatedBlocks.set((int)fsNameSystem.getUnderReplicatedBlocks());
    scheduledReplicationBlocks.set((int)fsNameSystem.
                                    getScheduledReplicationBlocks());
    missingBlocks.set((int)fsNameSystem.getMissingBlocksCount());
    blockCapacity.set(fsNameSystem.getBlockCapacity());

    for (MetricsBase m : registry.getMetricsList()) {
      m.pushMetric(metricsRecord);
    }
  }
  metricsRecord.update();
}
 
Example #24
Source File: DFSTestUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Wait for datanode to reach alive or dead state for waitTime given in
 * milliseconds.
 */
public static void waitForDatanodeState(
    final MiniDFSCluster cluster, final String nodeID,
    final boolean alive, int waitTime)
    throws TimeoutException, InterruptedException {
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      FSNamesystem namesystem = cluster.getNamesystem();
      final DatanodeDescriptor dd = BlockManagerTestUtil.getDatanode(
          namesystem, nodeID);
      return (dd.isAlive == alive);
    }
  }, 100, waitTime);
}
 
Example #25
Source File: TestDecommission.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void testClusterStats(int numNameNodes) throws IOException,
    InterruptedException {
  LOG.info("Starting test testClusterStats");
  int numDatanodes = 1;
  startCluster(numNameNodes, numDatanodes, conf);
  
  for (int i = 0; i < numNameNodes; i++) {
    FileSystem fileSys = cluster.getFileSystem(i);
    Path file = new Path("testClusterStats.dat");
    writeFile(fileSys, file, 1);
    
    FSNamesystem fsn = cluster.getNamesystem(i);
    NameNode namenode = cluster.getNameNode(i);
    
    DatanodeInfo decomInfo = decommissionNode(i, null, null,
        AdminStates.DECOMMISSION_INPROGRESS);
    DataNode decomNode = getDataNode(decomInfo);
    // Check namenode stats for multiple datanode heartbeats
    verifyStats(namenode, fsn, decomInfo, decomNode, true);
    
    // Stop decommissioning and verify stats
    writeConfigFile(excludeFile, null);
    refreshNodes(fsn, conf);
    DatanodeInfo retInfo = NameNodeAdapter.getDatanode(fsn, decomInfo);
    DataNode retNode = getDataNode(decomInfo);
    waitNodeState(retInfo, AdminStates.NORMAL);
    verifyStats(namenode, fsn, retInfo, retNode, false);
  }
}
 
Example #26
Source File: TestDatanodeManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test (timeout = 100000)
public void testRejectUnresolvedDatanodes() throws IOException {
  //Create the DatanodeManager which will be tested
  FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
  Mockito.when(fsn.hasWriteLock()).thenReturn(true);
  
  Configuration conf = new Configuration();
  
  //Set configuration property for rejecting unresolved topology mapping
  conf.setBoolean(
      DFSConfigKeys.DFS_REJECT_UNRESOLVED_DN_TOPOLOGY_MAPPING_KEY, true);
  
  //set TestDatanodeManager.MyResolver to be used for topology resolving
  conf.setClass(
      CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
      TestDatanodeManager.MyResolver.class, DNSToSwitchMapping.class);
  
  //create DatanodeManager
  DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class),
      fsn, conf);
  
  //storageID to register.
  String storageID = "someStorageID-123";
  
  DatanodeRegistration dr = Mockito.mock(DatanodeRegistration.class);
  Mockito.when(dr.getDatanodeUuid()).thenReturn(storageID);
  
  try {
    //Register this node
    dm.registerDatanode(dr);
    Assert.fail("Expected an UnresolvedTopologyException");
  } catch (UnresolvedTopologyException ute) {
    LOG.info("Expected - topology is not resolved and " +
        "registration is rejected.");
  } catch (Exception e) {
    Assert.fail("Expected an UnresolvedTopologyException");
  }
}
 
Example #27
Source File: TestNodeCount.java    From hadoop with Apache License 2.0 5 votes vote down vote up
NumberReplicas countNodes(Block block, FSNamesystem namesystem) {
  namesystem.readLock();
  try {
    lastBlock = block;
    lastNum = namesystem.getBlockManager().countNodes(block);
    return lastNum;
  }
  finally {
    namesystem.readUnlock();
  }
}
 
Example #28
Source File: TestBlocksWithNotEnoughRacks.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testUnderReplicatedUsesNewRacks() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 3;
  final Path filePath = new Path("/testFile");
  // All datanodes are on the same rack
  String racks[] = {"/rack1", "/rack1", "/rack1", "/rack1", "/rack1"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 1, REPLICATION_FACTOR, 0);
    
    // Add new datanodes on a different rack and increase the
    // replication factor so the block is underreplicated and make
    // sure at least one of the hosts on the new rack is used. 
    String newRacks[] = {"/rack2", "/rack2"};
    cluster.startDataNodes(conf, 2, true, null, newRacks);
    REPLICATION_FACTOR = 5;
    NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR);

    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
Example #29
Source File: TestDataNodeVolumeFailure.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * verifies two things:
 *  1. number of locations of each block in the name node
 *   matches number of actual files
 *  2. block files + pending block equals to total number of blocks that a file has 
 *     including the replication (HDFS file has 30 blocks, repl=2 - total 60
 * @param fn - file name
 * @param fs - file size
 * @throws IOException
 */
private void verify(String fn, int fs) throws IOException{
  // now count how many physical blocks are there
  int totalReal = countRealBlocks(block_map);
  System.out.println("countRealBlocks counted " + totalReal + " blocks");

  // count how many blocks store in NN structures.
  int totalNN = countNNBlocks(block_map, fn, fs);
  System.out.println("countNNBlocks counted " + totalNN + " blocks");

  for(String bid : block_map.keySet()) {
    BlockLocs bl = block_map.get(bid);
    // System.out.println(bid + "->" + bl.num_files + "vs." + bl.num_locs);
    // number of physical files (1 or 2) should be same as number of datanodes
    // in the list of the block locations
    assertEquals("Num files should match num locations",
        bl.num_files, bl.num_locs);
  }
  assertEquals("Num physical blocks should match num stored in the NN",
      totalReal, totalNN);

  // now check the number of under-replicated blocks
  FSNamesystem fsn = cluster.getNamesystem();
  // force update of all the metric counts by calling computeDatanodeWork
  BlockManagerTestUtil.getComputedDatanodeWork(fsn.getBlockManager());
  // get all the counts 
  long underRepl = fsn.getUnderReplicatedBlocks();
  long pendRepl = fsn.getPendingReplicationBlocks();
  long totalRepl = underRepl + pendRepl;
  System.out.println("underreplicated after = "+ underRepl + 
      " and pending repl ="  + pendRepl + "; total underRepl = " + totalRepl);

  System.out.println("total blocks (real and replicating):" + 
      (totalReal + totalRepl) + " vs. all files blocks " + blocks_num*2);

  // together all the blocks should be equal to all real + all underreplicated
  assertEquals("Incorrect total block count",
      totalReal + totalRepl, blocks_num * repl);
}
 
Example #30
Source File: NNThroughputBenchmark.java    From RDFS with Apache License 2.0 5 votes vote down vote up
static void turnOffNameNodeLogging() {
	// change log level to ERROR: NameNode.LOG & NameNode.stateChangeLog
	((Log4JLogger) NameNode.LOG).getLogger().setLevel(Level.ERROR);
	((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(
			Level.ERROR);
	((Log4JLogger) NetworkTopology.LOG).getLogger().setLevel(Level.ERROR);
	((Log4JLogger) FSNamesystem.LOG).getLogger().setLevel(Level.ERROR);
	((Log4JLogger) FSNamesystem.auditLog).getLogger().setLevel(Level.ERROR);
	((Log4JLogger) LeaseManager.LOG).getLogger().setLevel(Level.ERROR);
}