Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#shutdown()

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster#shutdown() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestOverReplicatedBlocks.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
Example 2
Source File: TestDataNodeMetrics.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public void testDataNodeMetrics() throws Exception {
  Configuration conf = new Configuration();
  conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  try {
    FileSystem fs = cluster.getFileSystem();
    final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 
    DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
        LONG_FILE_LEN, (short)1, 1L);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    DataNodeMetrics metrics = datanode.getMetrics();
    assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 3
Source File: TestDataJoin.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
public static Test suite() {
  TestSetup setup = new TestSetup(new TestSuite(TestDataJoin.class)) {
    protected void setUp() throws Exception {
      Configuration conf = new Configuration();
      cluster = new MiniDFSCluster(conf, 2, true, null);
    }
    protected void tearDown() throws Exception {
      if (cluster != null) {
        cluster.shutdown();
      }
    }
  };
  return setup;
}
 
Example 4
Source File: TestGenericJournalConf.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that a dummy implementation of JournalManager can
 * be initialized on startup
 */
@Test
public void testDummyJournalManager() throws Exception {
  MiniDFSCluster cluster = null;
  Configuration conf = new Configuration();

  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX + ".dummy",
           DummyJournalManager.class.getName());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, DUMMY_URI);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, 0);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    
    assertTrue(DummyJournalManager.shouldPromptCalled);
    assertTrue(DummyJournalManager.formatCalled);
    assertNotNull(DummyJournalManager.conf);
    assertEquals(new URI(DUMMY_URI), DummyJournalManager.uri);
    assertNotNull(DummyJournalManager.nsInfo);
    assertEquals(DummyJournalManager.nsInfo.getClusterID(),
        cluster.getNameNode().getNamesystem().getClusterId());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 5
Source File: TestBalancer.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void testBalancerDefaultConstructor(Configuration conf, long[] capacities,
    String[] racks, long newCapacity, String newRack)
    throws Exception {
  int numOfDatanodes = capacities.length;
  assertEquals(numOfDatanodes, racks.length);
  cluster = new MiniDFSCluster(0, conf, capacities.length, true, true, null, 
      racks, capacities);
  try {
    cluster.waitActive();
    client = DFSClient.createNamenode(conf);

    long totalCapacity = 0L;
    for (long capacity : capacities) {
      totalCapacity += capacity;
    }
    // fill up the cluster to be 30% full
    long totalUsedSpace = totalCapacity * 3 / 10;
    createFile(totalUsedSpace / numOfDatanodes, (short) numOfDatanodes);
    // start up an empty node with the same capacity and on the same rack
    cluster.startDataNodes(conf, 1, true, null, new String[] { newRack },
        new long[] { newCapacity });
    totalCapacity += newCapacity;
    // run balancer and validate results
    runBalancer(conf, totalUsedSpace, totalCapacity);
  } finally {
    cluster.shutdown();
  }
}
 
Example 6
Source File: TestMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test Mover Cli by specifying a list of files/directories using option "-p".
 * There is only one namenode (and hence name service) specified in the conf.
 */
@Test
public void testMoverCli() throws Exception {
  final MiniDFSCluster cluster = new MiniDFSCluster
      .Builder(new HdfsConfiguration()).numDataNodes(0).build();
  try {
    final Configuration conf = cluster.getConfiguration(0);
    try {
      Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "bar");
      Assert.fail("Expected exception for illegal path bar");
    } catch (IllegalArgumentException e) {
      GenericTestUtils.assertExceptionContains("bar is not absolute", e);
    }

    Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf);
    Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, namenodes.size());
    Assert.assertEquals(1, movePaths.size());
    URI nn = namenodes.iterator().next();
    Assert.assertTrue(movePaths.containsKey(nn));
    Assert.assertNull(movePaths.get(nn));

    movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar");
    namenodes = DFSUtil.getNsServiceRpcUris(conf);
    Assert.assertEquals(1, movePaths.size());
    nn = namenodes.iterator().next();
    Assert.assertTrue(movePaths.containsKey(nn));
    checkMovePaths(movePaths.get(nn), new Path("/foo"), new Path("/bar"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 7
Source File: TestShortCircuitLocalRead.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout=10000)
public void testDeprecatedGetBlockLocalPathInfoRpc() throws IOException {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(true).build();
  cluster.waitActive();
  FileSystem fs = cluster.getFileSystem();
  try {
    DFSTestUtil.createFile(fs, new Path("/tmp/x"), 16, (short) 1, 23);
    LocatedBlocks lb = cluster.getNameNode().getRpcServer()
        .getBlockLocations("/tmp/x", 0, 16);
    // Create a new block object, because the block inside LocatedBlock at
    // namenode is of type BlockInfo.
    ExtendedBlock blk = new ExtendedBlock(lb.get(0).getBlock());
    Token<BlockTokenIdentifier> token = lb.get(0).getBlockToken();
    final DatanodeInfo dnInfo = lb.get(0).getLocations()[0];
    ClientDatanodeProtocol proxy = 
        DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf, 60000, false);
    try {
      proxy.getBlockLocalPathInfo(blk, token);
      Assert.fail("The call should have failed as this user "
          + " is not allowed to call getBlockLocalPathInfo");
    } catch (IOException ex) {
      Assert.assertTrue(ex.getMessage().contains(
          "not allowed to call getBlockLocalPathInfo"));
    }
  } finally {
    fs.close();
    cluster.shutdown();
  }
}
 
Example 8
Source File: TestWebHDFSForHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testSecureHAToken() throws IOException, InterruptedException {
  Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
  conf.setBoolean(DFSConfigKeys
          .DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);

  MiniDFSCluster cluster = null;
  WebHdfsFileSystem fs = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
        .numDataNodes(0).build();

    HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
    cluster.waitActive();

    fs = spy((WebHdfsFileSystem) FileSystem.get(WEBHDFS_URI, conf));
    FileSystemTestHelper.addFileSystemForTesting(WEBHDFS_URI, conf, fs);

    cluster.transitionToActive(0);
    Token<?> token = fs.getDelegationToken(null);

    cluster.shutdownNameNode(0);
    cluster.transitionToActive(1);
    token.renew(conf);
    token.cancel(conf);
    verify(fs).renewDelegationToken(token);
    verify(fs).cancelDelegationToken(token);
  } finally {
    IOUtils.cleanup(null, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 9
Source File: TestMover.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testMoverFailedRetry() throws Exception {
  // HDFS-8147
  final Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_MOVER_RETRY_MAX_ATTEMPTS_KEY, "2");
  final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(3)
      .storageTypes(
          new StorageType[][] {{StorageType.DISK, StorageType.ARCHIVE},
              {StorageType.DISK, StorageType.ARCHIVE},
              {StorageType.DISK, StorageType.ARCHIVE}}).build();
  try {
    cluster.waitActive();
    final DistributedFileSystem dfs = cluster.getFileSystem();
    final String file = "/testMoverFailedRetry";
    // write to DISK
    final FSDataOutputStream out = dfs.create(new Path(file), (short) 2);
    out.writeChars("testMoverFailedRetry");
    out.close();

    // Delete block file so, block move will fail with FileNotFoundException
    LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0);
    cluster.corruptBlockOnDataNodesByDeletingBlockFile(lb.getBlock());
    // move to ARCHIVE
    dfs.setStoragePolicy(new Path(file), "COLD");
    int rc = ToolRunner.run(conf, new Mover.Cli(),
        new String[] {"-p", file.toString()});
    Assert.assertEquals("Movement should fail after some retry",
        ExitStatus.IO_EXCEPTION.getExitCode(), rc);
  } finally {
    cluster.shutdown();
  }
}
 
Example 10
Source File: TestOverReplicatedBlocks.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** Test processOverReplicatedBlock can handle corrupt replicas fine.
 * It make sure that it won't treat corrupt replicas as valid ones 
 * thus prevents NN deleting valid replicas but keeping
 * corrupt ones.
 */
@Test
public void testProcesOverReplicateBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, 100L);
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
      Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fs = cluster.getFileSystem();

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short)3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short)3);
    
    // corrupt the block on datanode 0
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    assertTrue(cluster.corruptReplica(0, block));
    DataNodeProperties dnProps = cluster.stopDataNode(0);
    // remove block scanner log to trigger block scanning
    File scanCursor = new File(new File(MiniDFSCluster.getFinalizedDir(
        cluster.getInstanceStorageDir(0, 0),
        cluster.getNamesystem().getBlockPoolId()).getParent()).getParent(),
        "scanner.cursor");
    //wait for one minute for deletion to succeed;
    for(int i = 0; !scanCursor.delete(); i++) {
      assertTrue("Could not delete " + scanCursor.getAbsolutePath() +
          " in one minute", i < 60);
      try {
        Thread.sleep(1000);
      } catch (InterruptedException ignored) {}
    }
    
    // restart the datanode so the corrupt replica will be detected
    cluster.restartDataNode(dnProps);
    DFSTestUtil.waitReplication(fs, fileName, (short)2);
    
    String blockPoolId = cluster.getNamesystem().getBlockPoolId();
    final DatanodeID corruptDataNode = 
      DataNodeTestUtils.getDNRegistrationForBP(
          cluster.getDataNodes().get(2), blockPoolId);
       
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    final HeartbeatManager hm = bm.getDatanodeManager().getHeartbeatManager();
    try {
      namesystem.writeLock();
      synchronized(hm) {
        // set live datanode's remaining space to be 0 
        // so they will be chosen to be deleted when over-replication occurs
        String corruptMachineName = corruptDataNode.getXferAddr();
        for (DatanodeDescriptor datanode : hm.getDatanodes()) {
          if (!corruptMachineName.equals(datanode.getXferAddr())) {
            datanode.getStorageInfos()[0].setUtilizationForTesting(100L, 100L, 0, 100L);
            datanode.updateHeartbeat(
                BlockManagerTestUtil.getStorageReportsForDatanode(datanode),
                0L, 0L, 0, 0, null);
          }
        }

        // decrease the replication factor to 1; 
        NameNodeAdapter.setReplication(namesystem, fileName.toString(), (short)1);

        // corrupt one won't be chosen to be excess one
        // without 4910 the number of live replicas would be 0: block gets lost
        assertEquals(1, bm.countNodes(block.getLocalBlock()).liveReplicas());
      }
    } finally {
      namesystem.writeUnlock();
    }
    
  } finally {
    cluster.shutdown();
  }
}
 
Example 11
Source File: TestRefreshNamenodes.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testRefreshNamenodes() throws IOException {
  // Start cluster with a single NN and DN
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    MiniDFSNNTopology topology = new MiniDFSNNTopology()
      .addNameservice(new NSConf("ns1").addNN(
          new NNConf(null).setIpcPort(nnPort1)))
      .setFederation(true);
    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(topology)
      .build();

    DataNode dn = cluster.getDataNodes().get(0);
    assertEquals(1, dn.getAllBpOs().length);

    cluster.addNameNode(conf, nnPort2);
    assertEquals(2, dn.getAllBpOs().length);

    cluster.addNameNode(conf, nnPort3);
    assertEquals(3, dn.getAllBpOs().length);

    cluster.addNameNode(conf, nnPort4);

    // Ensure a BPOfferService in the datanodes corresponds to
    // a namenode in the cluster
    Set<InetSocketAddress> nnAddrsFromCluster = Sets.newHashSet();
    for (int i = 0; i < 4; i++) {
      assertTrue(nnAddrsFromCluster.add(
          cluster.getNameNode(i).getNameNodeAddress()));
    }
    
    Set<InetSocketAddress> nnAddrsFromDN = Sets.newHashSet();
    for (BPOfferService bpos : dn.getAllBpOs()) {
      for (BPServiceActor bpsa : bpos.getBPServiceActors()) {
        assertTrue(nnAddrsFromDN.add(bpsa.getNNSocketAddress()));
      }
    }
    
    assertEquals("",
        Joiner.on(",").join(
          Sets.symmetricDifference(nnAddrsFromCluster, nnAddrsFromDN)));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 12
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test case where the name node is reformatted while the secondary namenode
 * is running. The secondary should shut itself down if if talks to a NN
 * with the wrong namespace.
 */
@Test
public void testReformatNNBetweenCheckpoints() throws IOException {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  
  Configuration conf = new HdfsConfiguration();
  conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      1);

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
        .format(true).build();
    int origPort = cluster.getNameNodePort();
    int origHttpPort = cluster.getNameNode().getHttpAddress().getPort();
    Configuration snnConf = new Configuration(conf);
    File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(),
      "namesecondary");
    snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      checkpointDir.getAbsolutePath());
    secondary = startSecondaryNameNode(snnConf);

    // secondary checkpoints once
    secondary.doCheckpoint();

    // we reformat primary NN
    cluster.shutdown();
    cluster = null;

    // Brief sleep to make sure that the 2NN's IPC connection to the NN
    // is dropped.
    try {
      Thread.sleep(100);
    } catch (InterruptedException ie) {
    }
    
    // Start a new NN with the same host/port.
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(0)
        .nameNodePort(origPort)
        .nameNodeHttpPort(origHttpPort)
        .format(true).build();

    try {
      secondary.doCheckpoint();
      fail("Should have failed checkpoint against a different namespace");
    } catch (IOException ioe) {
      LOG.info("Got expected failure", ioe);
      assertTrue(ioe.toString().contains("Inconsistent checkpoint"));
    }
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }  
}
 
Example 13
Source File: TestBookKeeperAsHASharedDir.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that two namenodes can't continue as primary
 */
@Test
public void testMultiplePrimariesStarted() throws Exception {
  Path p1 = new Path("/testBKJMMultiplePrimary");

  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY,
             BKJMUtil.createJournalURI("/hotfailoverMultiple").toString());
    BKJMUtil.addJournalManagerDefinition(conf);

    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleHATopology())
      .numDataNodes(0)
      .manageNameDfsSharedDirs(false)
      .checkExitOnShutdown(false)
      .build();
    NameNode nn1 = cluster.getNameNode(0);
    NameNode nn2 = cluster.getNameNode(1);
    cluster.waitActive();
    cluster.transitionToActive(0);

    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    fs.mkdirs(p1);
    nn1.getRpcServer().rollEditLog();
    cluster.transitionToActive(1);
    fs = cluster.getFileSystem(0); // get the older active server.

    try {
      fs.delete(p1, true);
      fail("Log update on older active should cause it to exit");
    } catch (RemoteException re) {
      assertTrue(re.getClassName().contains("ExitException"));
    }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 14
Source File: TestDataNodeMXBean.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testDataNodeMXBean() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    List<DataNode> datanodes = cluster.getDataNodes();
    Assert.assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);

    MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); 
    ObjectName mxbeanName = new ObjectName(
        "Hadoop:service=DataNode,name=DataNodeInfo");
    // get attribute "ClusterId"
    String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
    Assert.assertEquals(datanode.getClusterId(), clusterId);
    // get attribute "Version"
    String version = (String)mbs.getAttribute(mxbeanName, "Version");
    Assert.assertEquals(datanode.getVersion(),version);
    // get attribute "RpcPort"
    String rpcPort = (String)mbs.getAttribute(mxbeanName, "RpcPort");
    Assert.assertEquals(datanode.getRpcPort(),rpcPort);
    // get attribute "HttpPort"
    String httpPort = (String)mbs.getAttribute(mxbeanName, "HttpPort");
    Assert.assertEquals(datanode.getHttpPort(),httpPort);
    // get attribute "NamenodeAddresses"
    String namenodeAddresses = (String)mbs.getAttribute(mxbeanName, 
        "NamenodeAddresses");
    Assert.assertEquals(datanode.getNamenodeAddresses(),namenodeAddresses);
    // get attribute "getVolumeInfo"
    String volumeInfo = (String)mbs.getAttribute(mxbeanName, "VolumeInfo");
    Assert.assertEquals(replaceDigits(datanode.getVolumeInfo()),
        replaceDigits(volumeInfo));
    // Ensure mxbean's XceiverCount is same as the DataNode's
    // live value.
    int xceiverCount = (Integer)mbs.getAttribute(mxbeanName,
        "XceiverCount");
    Assert.assertEquals(datanode.getXceiverCount(), xceiverCount);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 15
Source File: TestBlockFixer.java    From RDFS with Apache License 2.0 4 votes vote down vote up
@Test
public void testFilterUnfixableFiles() throws IOException {
  conf = new Configuration();
  dfsCluster = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
  dfsCluster.waitActive();
  FileSystem fs = dfsCluster.getFileSystem();

  Utils.loadTestCodecs(conf);
  try {
    Configuration testConf = fs.getConf();
    BlockIntegrityMonitor blockFixer = new LocalBlockIntegrityMonitor(testConf);

    String p1 = "/user/foo/f1";
    String p2 = "/user/foo/f2";
    String p3 = "/user/foo/bar/f1";
    String p4 = "/raid/user/foo";
    String p5 = "/raidrs/user/foo/bar";
    fs.mkdirs(new Path(p4));

    List<String> fileList = new ArrayList<String>();
    fileList.add(p1);
    fileList.add(p2);
    fileList.add(p3);

    blockFixer.filterUnreconstructableSourceFiles(fs, fileList.iterator());
    // p3 should be filtered out.
    assertEquals(2, fileList.size());

    Set<String> filtered = new HashSet<String>();
    for (String p: fileList) filtered.add(p);
    assertFalse("File not filtered", filtered.contains(p3));

    fileList.add(p3);
    fs.mkdirs(new Path(p5));
    blockFixer.filterUnreconstructableSourceFiles(fs, fileList.iterator());
    // Nothing is filtered.
    assertEquals(3, fileList.size());
  } finally {
    dfsCluster.shutdown();
  }
}
 
Example 16
Source File: TestStartup.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private void testImageChecksum(boolean compress) throws Exception {
  MiniDFSCluster cluster = null;
  if (compress) {
    config.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, true);
  }

  try {
      LOG.info("\n===========================================\n" +
               "Starting empty cluster");
      
      cluster = new MiniDFSCluster.Builder(config)
        .numDataNodes(0)
        .format(true)
        .build();
      cluster.waitActive();
      
      FileSystem fs = cluster.getFileSystem();
      fs.mkdirs(new Path("/test"));
      
      LOG.info("Shutting down cluster #1");
      cluster.shutdown();
      cluster = null;

      // Corrupt the md5 files in all the namedirs
      corruptFSImageMD5(true);

      // Attach our own log appender so we can verify output
      final LogVerificationAppender appender = new LogVerificationAppender();
      final Logger logger = Logger.getRootLogger();
      logger.addAppender(appender);

      // Try to start a new cluster
      LOG.info("\n===========================================\n" +
      "Starting same cluster after simulated crash");
      try {
        cluster = new MiniDFSCluster.Builder(config)
          .numDataNodes(0)
          .format(false)
          .build();
        fail("Should not have successfully started with corrupt image");
      } catch (IOException ioe) {
        GenericTestUtils.assertExceptionContains(
            "Failed to load an FSImage file!", ioe);
        int md5failures = appender.countExceptionsWithMessage(
            " is corrupt with MD5 checksum of ");
        // Two namedirs, so should have seen two failures
        assertEquals(2, md5failures);
      }
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 17
Source File: TestNameNodeResourceChecker.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that NameNode resource monitor causes the NN to enter safe mode when
 * resources are low.
 */
@Test
public void testCheckThatNameNodeResourceMonitorIsRunning()
    throws IOException, InterruptedException {
  MiniDFSCluster cluster = null;
  try {
    conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, 1);
    
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(1).build();

    NameNodeResourceChecker mockResourceChecker = Mockito.mock(NameNodeResourceChecker.class);
    Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(true);
    cluster.getNameNode().getNamesystem().nnResourceChecker = mockResourceChecker;

    cluster.waitActive();

    String name = NameNodeResourceMonitor.class.getName();

    boolean isNameNodeMonitorRunning = false;
    Set<Thread> runningThreads = Thread.getAllStackTraces().keySet();
    for (Thread runningThread : runningThreads) {
      if (runningThread.toString().startsWith("Thread[" + name)) {
        isNameNodeMonitorRunning = true;
        break;
      }
    }
    assertTrue("NN resource monitor should be running",
        isNameNodeMonitorRunning);
    assertFalse("NN should not presently be in safe mode",
        cluster.getNameNode().isInSafeMode());
    
    Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(false);

    // Make sure the NNRM thread has a chance to run.
    long startMillis = Time.now();
    while (!cluster.getNameNode().isInSafeMode() &&
        Time.now() < startMillis + (60 * 1000)) {
      Thread.sleep(1000);
    }

    assertTrue("NN should be in safe mode after resources crossed threshold",
        cluster.getNameNode().isInSafeMode());
  } finally {
    if (cluster != null)
      cluster.shutdown();
  }
}
 
Example 18
Source File: TestDelegatingInputFormat.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void testSplitting() throws Exception {
  JobConf conf = new JobConf();
  MiniDFSCluster dfs = null;
  try {
    dfs = new MiniDFSCluster.Builder(conf).numDataNodes(4)
        .racks(new String[] { "/rack0", "/rack0", "/rack1", "/rack1" })
        .hosts(new String[] { "host0", "host1", "host2", "host3" })
        .build();
    FileSystem fs = dfs.getFileSystem();

    Path path = getPath("/foo/bar", fs);
    Path path2 = getPath("/foo/baz", fs);
    Path path3 = getPath("/bar/bar", fs);
    Path path4 = getPath("/bar/baz", fs);

    final int numSplits = 100;

    MultipleInputs.addInputPath(conf, path, TextInputFormat.class,
       MapClass.class);
    MultipleInputs.addInputPath(conf, path2, TextInputFormat.class,
       MapClass2.class);
    MultipleInputs.addInputPath(conf, path3, KeyValueTextInputFormat.class,
       MapClass.class);
    MultipleInputs.addInputPath(conf, path4, TextInputFormat.class,
       MapClass2.class);
    DelegatingInputFormat inFormat = new DelegatingInputFormat();
    InputSplit[] splits = inFormat.getSplits(conf, numSplits);

    int[] bins = new int[3];
    for (InputSplit split : splits) {
     assertTrue(split instanceof TaggedInputSplit);
     final TaggedInputSplit tis = (TaggedInputSplit) split;
     int index = -1;

     if (tis.getInputFormatClass().equals(KeyValueTextInputFormat.class)) {
       // path3
       index = 0;
     } else if (tis.getMapperClass().equals(MapClass.class)) {
       // path
       index = 1;
     } else {
       // path2 and path4
       index = 2;
     }

     bins[index]++;
    }

    // Each bin is a unique combination of a Mapper and InputFormat, and
    // DelegatingInputFormat should split each bin into numSplits splits,
    // regardless of the number of paths that use that Mapper/InputFormat
    for (int count : bins) {
     assertEquals(numSplits, count);
    }

    assertTrue(true);
  } finally {
    if (dfs != null) {
     dfs.shutdown();
    }
  }
}
 
Example 19
Source File: TestEditLog.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testEditChecksum() throws Exception {
  // start a cluster 
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;
  FileSystem fileSys = null;
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
  cluster.waitActive();
  fileSys = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();

  FSImage fsimage = namesystem.getFSImage();
  final FSEditLog editLog = fsimage.getEditLog();
  fileSys.mkdirs(new Path("/tmp"));

  Iterator<StorageDirectory> iter = fsimage.getStorage().
    dirIterator(NameNodeDirType.EDITS);
  LinkedList<StorageDirectory> sds = new LinkedList<StorageDirectory>();
  while (iter.hasNext()) {
    sds.add(iter.next());
  }
  editLog.close();
  cluster.shutdown();

  for (StorageDirectory sd : sds) {
    File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3);
    assertTrue(editFile.exists());

    long fileLen = editFile.length();
    LOG.debug("Corrupting Log File: " + editFile + " len: " + fileLen);
    RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
    rwf.seek(fileLen-4); // seek to checksum bytes
    int b = rwf.readInt();
    rwf.seek(fileLen-4);
    rwf.writeInt(b+1);
    rwf.close();
  }
  
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
    fail("should not be able to start");
  } catch (IOException e) {
    // expected
    assertNotNull("Cause of exception should be ChecksumException", e.getCause());
    assertEquals("Cause of exception should be ChecksumException",
        ChecksumException.class, e.getCause().getClass());
  }
}
 
Example 20
Source File: TestDelegatingInputFormat.java    From big-c with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
public void testSplitting() throws Exception {
  Job job = Job.getInstance();
  MiniDFSCluster dfs = null;
  try {
    dfs = new MiniDFSCluster.Builder(job.getConfiguration()).numDataNodes(4)
        .racks(new String[] { "/rack0", "/rack0", "/rack1", "/rack1" })
        .hosts(new String[] { "host0", "host1", "host2", "host3" })
        .build();
    FileSystem fs = dfs.getFileSystem();

    Path path = getPath("/foo/bar", fs);
    Path path2 = getPath("/foo/baz", fs);
    Path path3 = getPath("/bar/bar", fs);
    Path path4 = getPath("/bar/baz", fs);

    final int numSplits = 100;

    FileInputFormat.setMaxInputSplitSize(job, 
            fs.getFileStatus(path).getLen() / numSplits);
    MultipleInputs.addInputPath(job, path, TextInputFormat.class,
       MapClass.class);
    MultipleInputs.addInputPath(job, path2, TextInputFormat.class,
       MapClass2.class);
    MultipleInputs.addInputPath(job, path3, KeyValueTextInputFormat.class,
       MapClass.class);
    MultipleInputs.addInputPath(job, path4, TextInputFormat.class,
       MapClass2.class);
    DelegatingInputFormat inFormat = new DelegatingInputFormat();

    int[] bins = new int[3];
    for (InputSplit split : (List<InputSplit>)inFormat.getSplits(job)) {
     assertTrue(split instanceof TaggedInputSplit);
     final TaggedInputSplit tis = (TaggedInputSplit) split;
     int index = -1;

     if (tis.getInputFormatClass().equals(KeyValueTextInputFormat.class)) {
       // path3
       index = 0;
     } else if (tis.getMapperClass().equals(MapClass.class)) {
       // path
       index = 1;
     } else {
       // path2 and path4
       index = 2;
     }

     bins[index]++;
    }

    assertEquals("count is not equal to num splits", numSplits, bins[0]);
    assertEquals("count is not equal to num splits", numSplits, bins[1]);
    assertEquals("count is not equal to 2 * num splits",
      numSplits * 2, bins[2]);
  } finally {
    if (dfs != null) {
     dfs.shutdown();
    }
  }
}