Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#getBaseDirectory()

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster#getBaseDirectory() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MiniJournalCluster.java    From big-c with Apache License 2.0 6 votes vote down vote up
private MiniJournalCluster(Builder b) throws IOException {
  LOG.info("Starting MiniJournalCluster with " +
      b.numJournalNodes + " journal nodes");
  
  if (b.baseDir != null) {
    this.baseDir = new File(b.baseDir);
  } else {
    this.baseDir = new File(MiniDFSCluster.getBaseDirectory());
  }

  nodes = new JNInfo[b.numJournalNodes];

  for (int i = 0; i < b.numJournalNodes; i++) {
    if (b.format) {
      File dir = getStorageDir(i);
      LOG.debug("Fully deleting JN directory " + dir);
      FileUtil.fullyDelete(dir);
    }
    JournalNode jn = new JournalNode();
    jn.setConf(createConfForNode(b, i));
    jn.start();
    nodes[i] = new JNInfo(jn);
  }
}
 
Example 2
Source File: TestFSNamesystem.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test that FSNamesystem#clear clears all leases.
 */
@Test
public void testFSNamespaceClearLeases() throws Exception {
  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
  DFSTestUtil.formatNameNode(conf);
  FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
  LeaseManager leaseMan = fsn.getLeaseManager();
  leaseMan.addLease("client1", "importantFile");
  assertEquals(1, leaseMan.countLease());
  fsn.clear();
  leaseMan = fsn.getLeaseManager();
  assertEquals(0, leaseMan.countLease());
}
 
Example 3
Source File: TestJournalNode.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws Exception {
  File editsDir = new File(MiniDFSCluster.getBaseDirectory() +
      File.separator + "TestJournalNode");
  FileUtil.fullyDelete(editsDir);
  
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
      editsDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_JOURNALNODE_RPC_ADDRESS_KEY,
      "0.0.0.0:0");
  jn = new JournalNode();
  jn.setConf(conf);
  jn.start();
  journalId = "test-journalid-" + GenericTestUtils.uniqueSequenceId();
  journal = jn.getOrCreateJournal(journalId);
  journal.format(FAKE_NSINFO);
  
  ch = new IPCLoggerChannel(conf, FAKE_NSINFO, journalId, jn.getBoundIpcAddress());
}
 
Example 4
Source File: TestOverReplicatedBlocks.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
Example 5
Source File: MiniJournalCluster.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private MiniJournalCluster(Builder b) throws IOException {
  LOG.info("Starting MiniJournalCluster with " +
      b.numJournalNodes + " journal nodes");
  
  if (b.baseDir != null) {
    this.baseDir = new File(b.baseDir);
  } else {
    this.baseDir = new File(MiniDFSCluster.getBaseDirectory());
  }

  nodes = new JNInfo[b.numJournalNodes];

  for (int i = 0; i < b.numJournalNodes; i++) {
    if (b.format) {
      File dir = getStorageDir(i);
      LOG.debug("Fully deleting JN directory " + dir);
      FileUtil.fullyDelete(dir);
    }
    JournalNode jn = new JournalNode();
    jn.setConf(createConfForNode(b, i));
    jn.start();
    nodes[i] = new JNInfo(jn);
  }
}
 
Example 6
Source File: TestOverReplicatedBlocks.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test over replicated block should get invalidated when decreasing the
 * replication for a partial block.
 */
@Test
public void testInvalidateOverReplicatedBlock() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3)
      .build();
  try {
    final FSNamesystem namesystem = cluster.getNamesystem();
    final BlockManager bm = namesystem.getBlockManager();
    FileSystem fs = cluster.getFileSystem();
    Path p = new Path(MiniDFSCluster.getBaseDirectory(), "/foo1");
    FSDataOutputStream out = fs.create(p, (short) 2);
    out.writeBytes("HDFS-3119: " + p);
    out.hsync();
    fs.setReplication(p, (short) 1);
    out.close();
    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, p);
    assertEquals("Expected only one live replica for the block", 1, bm
        .countNodes(block.getLocalBlock()).liveReplicas());
  } finally {
    cluster.shutdown();
  }
}
 
Example 7
Source File: TestStartup.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  config = new HdfsConfiguration();
  hdfsDir = new File(MiniDFSCluster.getBaseDirectory());

  if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
    throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
  }
  LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
  config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      fileAsURI(new File(hdfsDir, "name")).toString());
  config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
      new File(hdfsDir, "data").getPath());
  config.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "0.0.0.0:0");
  config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      fileAsURI(new File(hdfsDir, "secondary")).toString());
  config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
      WILDCARD_HTTP_HOST + "0");
  
  FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
 
Example 8
Source File: TestValidateConfigurationSettings.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Tests setting the rpc port to the same as the web port to test that 
 * an exception
 * is thrown when trying to re-use the same port
 */
@Test(expected = BindException.class, timeout = 300000)
public void testThatMatchingRPCandHttpPortsThrowException() 
    throws IOException {

  NameNode nameNode = null;
  try {
    Configuration conf = new HdfsConfiguration();
    File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
        nameDir.getAbsolutePath());

    Random rand = new Random();
    final int port = 30000 + rand.nextInt(30000);

    // set both of these to the same port. It should fail.
    FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port);
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port);
    DFSTestUtil.formatNameNode(conf);
    nameNode = new NameNode(conf);
  } finally {
    if (nameNode != null) {
      nameNode.stop();
    }
  }
}
 
Example 9
Source File: TestStartupDefaultRack.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Test
public void testStartup() throws Exception {
  conf = new Configuration();
  conf.setClass("dfs.block.replicator.classname",
      BlockPlacementPolicyConfigurable.class, BlockPlacementPolicy.class);
  File baseDir = MiniDFSCluster.getBaseDirectory(conf);
  baseDir.mkdirs();
  File hostsFile = new File(baseDir, "hosts");
  FileOutputStream out = new FileOutputStream(hostsFile);
  out.write("h1\n".getBytes());
  out.write("h2\n".getBytes());
  out.write("h3\n".getBytes());
  out.close();
  conf.set("dfs.hosts", hostsFile.getAbsolutePath());
  StaticMapping.addNodeToRack("h1", "/r1");
  StaticMapping.addNodeToRack("h2", "/r2");
  StaticMapping.addNodeToRack("h3", NetworkTopology.DEFAULT_RACK);
  cluster = new MiniDFSCluster(conf, 3, new String[] { "/r1", "/r2",
      NetworkTopology.DEFAULT_RACK }, new String[] { "h1", "h2", "h3" },
      true, false);
  DFSTestUtil util = new DFSTestUtil("/testStartup", 10, 10, 1024);
  util.createFiles(cluster.getFileSystem(), "/");
  util.checkFiles(cluster.getFileSystem(), "/");
  assertEquals(2,
      cluster.getNameNode().getDatanodeReport(DatanodeReportType.LIVE).length);
  cluster.shutdown();
}
 
Example 10
Source File: TestValidateConfigurationSettings.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * HDFS-3013: NameNode format command doesn't pick up
 * dfs.namenode.name.dir.NameServiceId configuration.
 */
@Test(timeout = 300000)
public void testGenericKeysForNameNodeFormat()
    throws IOException {
  Configuration conf = new HdfsConfiguration();

  // Set ephemeral ports 
  conf.set(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY,
      "127.0.0.1:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY,
      "127.0.0.1:0");
  
  conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
  
  // Set a nameservice-specific configuration for name dir
  File dir = new File(MiniDFSCluster.getBaseDirectory(),
      "testGenericKeysForNameNodeFormat");
  if (dir.exists()) {
    FileUtil.fullyDelete(dir);
  }
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + ".ns1",
      dir.getAbsolutePath());
  
  // Format and verify the right dir is formatted.
  DFSTestUtil.formatNameNode(conf);
  GenericTestUtils.assertExists(dir);

  // Ensure that the same dir is picked up by the running NN
  NameNode nameNode = new NameNode(conf);
  nameNode.stop();
}
 
Example 11
Source File: TestValidateConfigurationSettings.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Tests setting the rpc port to a different as the web port that an 
 * exception is NOT thrown 
 */
@Test(timeout = 300000)
public void testThatDifferentRPCandHttpPortsAreOK() 
    throws IOException {

  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      nameDir.getAbsolutePath());

  Random rand = new Random();

  // A few retries in case the ports we choose are in use.
  for (int i = 0; i < 5; ++i) {
    final int port1 = 30000 + rand.nextInt(10000);
    final int port2 = port1 + 1 + rand.nextInt(10000);

    FileSystem.setDefaultUri(conf, "hdfs://localhost:" + port1);
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:" + port2);
    DFSTestUtil.formatNameNode(conf);
    NameNode nameNode = null;

    try {
      nameNode = new NameNode(conf); // should be OK!
      break;
    } catch(BindException be) {
      continue;     // Port in use? Try another.
    } finally {
      if (nameNode != null) {
        nameNode.stop();
      }
    }
  }
}
 
Example 12
Source File: TestStandbyCheckpoints.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test cancellation of ongoing checkpoints when failover happens
 * mid-checkpoint. 
 */
@Test(timeout=120000)
public void testCheckpointCancellation() throws Exception {
  cluster.transitionToStandby(0);
  
  // Create an edit log in the shared edits dir with a lot
  // of mkdirs operations. This is solely so that the image is
  // large enough to take a non-trivial amount of time to load.
  // (only ~15MB)
  URI sharedUri = cluster.getSharedEditsDir(0, 1);
  File sharedDir = new File(sharedUri.getPath(), "current");
  File tmpDir = new File(MiniDFSCluster.getBaseDirectory(),
      "testCheckpointCancellation-tmp");
  FSNamesystem fsn = cluster.getNamesystem(0);
  FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG, 3,
      fsn.getFSDirectory().getLastInodeId() + 1);
  String fname = NNStorage.getInProgressEditsFileName(3); 
  new File(tmpDir, fname).renameTo(new File(sharedDir, fname));

  // Checkpoint as fast as we can, in a tight loop.
  cluster.getConfiguration(1).setInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
  cluster.restartNameNode(1);
  nn1 = cluster.getNameNode(1);

  cluster.transitionToActive(0);    
  
  boolean canceledOne = false;
  for (int i = 0; i < 10 && !canceledOne; i++) {
    
    doEdits(i*10, i*10 + 10);
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    cluster.transitionToStandby(1);
    cluster.transitionToActive(0);
    canceledOne = StandbyCheckpointer.getCanceledCount() > 0;
  }
  
  assertTrue(canceledOne);
}
 
Example 13
Source File: TestSaveNamespace.java    From big-c with Apache License 2.0 5 votes vote down vote up
private Configuration getConf() throws IOException {
  String baseDir = MiniDFSCluster.getBaseDirectory();
  String nameDirs = fileAsURI(new File(baseDir, "name1")) + "," + 
                    fileAsURI(new File(baseDir, "name2"));

  Configuration conf = new HdfsConfiguration();
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDirs);
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDirs);
  conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); 
  return conf;
}
 
Example 14
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that, if the edits dir is separate from the name dir, it is
 * properly locked.
 **/
@Test
public void testSeparateEditsDirLocking() throws IOException {
  Configuration conf = new HdfsConfiguration();
  File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
  File editsDir = new File(MiniDFSCluster.getBaseDirectory(),
      "testSeparateEditsDirLocking");

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      nameDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
      editsDir.getAbsolutePath());
  MiniDFSCluster cluster = null;
  
  // Start a NN, and verify that lock() fails in all of the configured
  // directories
  StorageDirectory savedSd = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
        .numDataNodes(0).build();
    NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
    for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
      assertEquals(editsDir.getAbsoluteFile(), sd.getRoot());
      assertLockFails(sd);
      savedSd = sd;
    }
  } finally {
    cleanup(cluster);
    cluster = null;
  }
  assertNotNull(savedSd);
  
  // Lock one of the saved directories, then start the NN, and make sure it
  // fails to start
  assertClusterStartFailsWhenDirLocked(conf, savedSd);
}
 
Example 15
Source File: TestStandbyCheckpoints.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test cancellation of ongoing checkpoints when failover happens
 * mid-checkpoint. 
 */
@Test(timeout=120000)
public void testCheckpointCancellation() throws Exception {
  cluster.transitionToStandby(0);
  
  // Create an edit log in the shared edits dir with a lot
  // of mkdirs operations. This is solely so that the image is
  // large enough to take a non-trivial amount of time to load.
  // (only ~15MB)
  URI sharedUri = cluster.getSharedEditsDir(0, 1);
  File sharedDir = new File(sharedUri.getPath(), "current");
  File tmpDir = new File(MiniDFSCluster.getBaseDirectory(),
      "testCheckpointCancellation-tmp");
  FSNamesystem fsn = cluster.getNamesystem(0);
  FSImageTestUtil.createAbortedLogWithMkdirs(tmpDir, NUM_DIRS_IN_LOG, 3,
      fsn.getFSDirectory().getLastInodeId() + 1);
  String fname = NNStorage.getInProgressEditsFileName(3); 
  new File(tmpDir, fname).renameTo(new File(sharedDir, fname));

  // Checkpoint as fast as we can, in a tight loop.
  cluster.getConfiguration(1).setInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
  cluster.restartNameNode(1);
  nn1 = cluster.getNameNode(1);

  cluster.transitionToActive(0);    
  
  boolean canceledOne = false;
  for (int i = 0; i < 10 && !canceledOne; i++) {
    
    doEdits(i*10, i*10 + 10);
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    cluster.transitionToStandby(1);
    cluster.transitionToActive(0);
    canceledOne = StandbyCheckpointer.getCanceledCount() > 0;
  }
  
  assertTrue(canceledOne);
}
 
Example 16
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test case where the name node is reformatted while the secondary namenode
 * is running. The secondary should shut itself down if if talks to a NN
 * with the wrong namespace.
 */
@Test
public void testReformatNNBetweenCheckpoints() throws IOException {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  
  Configuration conf = new HdfsConfiguration();
  conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
      1);

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
        .format(true).build();
    int origPort = cluster.getNameNodePort();
    int origHttpPort = cluster.getNameNode().getHttpAddress().getPort();
    Configuration snnConf = new Configuration(conf);
    File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(),
      "namesecondary");
    snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
      checkpointDir.getAbsolutePath());
    secondary = startSecondaryNameNode(snnConf);

    // secondary checkpoints once
    secondary.doCheckpoint();

    // we reformat primary NN
    cluster.shutdown();
    cluster = null;

    // Brief sleep to make sure that the 2NN's IPC connection to the NN
    // is dropped.
    try {
      Thread.sleep(100);
    } catch (InterruptedException ie) {
    }
    
    // Start a new NN with the same host/port.
    cluster = new MiniDFSCluster.Builder(conf)
        .numDataNodes(0)
        .nameNodePort(origPort)
        .nameNodeHttpPort(origHttpPort)
        .format(true).build();

    try {
      secondary.doCheckpoint();
      fail("Should have failed checkpoint against a different namespace");
    } catch (IOException ioe) {
      LOG.info("Got expected failure", ioe);
      assertTrue(ioe.toString().contains("Inconsistent checkpoint"));
    }
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }  
}
 
Example 17
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that the secondary doesn't have to re-download image
 * if it hasn't changed.
 */
@Test
public void testSecondaryImageDownload() throws IOException {
  LOG.info("Starting testSecondaryImageDownload");
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  Path dir = new Path("/checkpoint");
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                             .numDataNodes(numDatanodes)
                                             .format(true).build();
  cluster.waitActive();
  FileSystem fileSys = cluster.getFileSystem();
  FSImage image = cluster.getNameNode().getFSImage();
  SecondaryNameNode secondary = null;
  try {
    assertTrue(!fileSys.exists(dir));
    //
    // Make the checkpoint
    //
    secondary = startSecondaryNameNode(conf);

    File secondaryDir = new File(MiniDFSCluster.getBaseDirectory(), "namesecondary1");
    File secondaryCurrent = new File(secondaryDir, "current");

    long expectedTxIdToDownload = cluster.getNameNode().getFSImage()
    .getStorage().getMostRecentCheckpointTxId();

    File secondaryFsImageBefore = new File(secondaryCurrent,
        NNStorage.getImageFileName(expectedTxIdToDownload));
    File secondaryFsImageAfter = new File(secondaryCurrent,
        NNStorage.getImageFileName(expectedTxIdToDownload + 2));
    
    assertFalse("Secondary should start with empty current/ dir " +
        "but " + secondaryFsImageBefore + " exists",
        secondaryFsImageBefore.exists());

    assertTrue("Secondary should have loaded an image",
        secondary.doCheckpoint());
    
    assertTrue("Secondary should have downloaded original image",
        secondaryFsImageBefore.exists());
    assertTrue("Secondary should have created a new image",
        secondaryFsImageAfter.exists());
    
    long fsimageLength = secondaryFsImageBefore.length();
    assertEquals("Image size should not have changed",
        fsimageLength,
        secondaryFsImageAfter.length());

    // change namespace
    fileSys.mkdirs(dir);
    
    assertFalse("Another checkpoint should not have to re-load image",
        secondary.doCheckpoint());
    
    for (StorageDirectory sd :
      image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
      File imageFile = NNStorage.getImageFile(sd, NameNodeFile.IMAGE,
          expectedTxIdToDownload + 5);
      assertTrue("Image size increased",
          imageFile.length() > fsimageLength);
    }

  } finally {
    fileSys.close();
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 18
Source File: TestDataNodeVolumeFailureToleration.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
 * option, ie the DN tolerates a failed-to-use scenario during
 * its start-up.
 */
@Test
public void testValidVolumesAtStartup() throws Exception {
  assumeTrue(!System.getProperty("os.name").startsWith("Windows"));

  // Make sure no DNs are running.
  cluster.shutdownDataNodes();

  // Bring up a datanode with two default data dirs, but with one bad one.
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);

  // We use subdirectories 0 and 1 in order to have only a single
  // data dir's parent inject a failure.
  File tld = new File(MiniDFSCluster.getBaseDirectory(), "badData");
  File dataDir1 = new File(tld, "data1");
  File dataDir1Actual = new File(dataDir1, "1");
  dataDir1Actual.mkdirs();
  // Force an IOE to occur on one of the dfs.data.dir.
  File dataDir2 = new File(tld, "data2");
  prepareDirToFail(dataDir2);
  File dataDir2Actual = new File(dataDir2, "2");

  // Start one DN, with manually managed DN dir
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
      dataDir1Actual.getPath() + "," + dataDir2Actual.getPath());
  cluster.startDataNodes(conf, 1, false, null, null);
  cluster.waitActive();

  try {
    assertTrue("The DN should have started up fine.",
        cluster.isDataNodeUp());
    DataNode dn = cluster.getDataNodes().get(0);
    String si = DataNodeTestUtils.getFSDataset(dn).getStorageInfo();
    assertTrue("The DN should have started with this directory",
        si.contains(dataDir1Actual.getPath()));
    assertFalse("The DN shouldn't have a bad directory.",
        si.contains(dataDir2Actual.getPath()));
  } finally {
    cluster.shutdownDataNodes();
    FileUtil.chmod(dataDir2.toString(), "755");
  }

}
 
Example 19
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that the secondary doesn't have to re-download image
 * if it hasn't changed.
 */
@Test
public void testSecondaryImageDownload() throws IOException {
  LOG.info("Starting testSecondaryImageDownload");
  Configuration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  Path dir = new Path("/checkpoint");
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
                                             .numDataNodes(numDatanodes)
                                             .format(true).build();
  cluster.waitActive();
  FileSystem fileSys = cluster.getFileSystem();
  FSImage image = cluster.getNameNode().getFSImage();
  SecondaryNameNode secondary = null;
  try {
    assertTrue(!fileSys.exists(dir));
    //
    // Make the checkpoint
    //
    secondary = startSecondaryNameNode(conf);

    File secondaryDir = new File(MiniDFSCluster.getBaseDirectory(), "namesecondary1");
    File secondaryCurrent = new File(secondaryDir, "current");

    long expectedTxIdToDownload = cluster.getNameNode().getFSImage()
    .getStorage().getMostRecentCheckpointTxId();

    File secondaryFsImageBefore = new File(secondaryCurrent,
        NNStorage.getImageFileName(expectedTxIdToDownload));
    File secondaryFsImageAfter = new File(secondaryCurrent,
        NNStorage.getImageFileName(expectedTxIdToDownload + 2));
    
    assertFalse("Secondary should start with empty current/ dir " +
        "but " + secondaryFsImageBefore + " exists",
        secondaryFsImageBefore.exists());

    assertTrue("Secondary should have loaded an image",
        secondary.doCheckpoint());
    
    assertTrue("Secondary should have downloaded original image",
        secondaryFsImageBefore.exists());
    assertTrue("Secondary should have created a new image",
        secondaryFsImageAfter.exists());
    
    long fsimageLength = secondaryFsImageBefore.length();
    assertEquals("Image size should not have changed",
        fsimageLength,
        secondaryFsImageAfter.length());

    // change namespace
    fileSys.mkdirs(dir);
    
    assertFalse("Another checkpoint should not have to re-load image",
        secondary.doCheckpoint());
    
    for (StorageDirectory sd :
      image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
      File imageFile = NNStorage.getImageFile(sd, NameNodeFile.IMAGE,
          expectedTxIdToDownload + 5);
      assertTrue("Image size increased",
          imageFile.length() > fsimageLength);
    }

  } finally {
    fileSys.close();
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 20
Source File: TestDataNodeVolumeFailureToleration.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test the DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY configuration
 * option, ie the DN tolerates a failed-to-use scenario during
 * its start-up.
 */
@Test
public void testValidVolumesAtStartup() throws Exception {
  assumeTrue(!System.getProperty("os.name").startsWith("Windows"));

  // Make sure no DNs are running.
  cluster.shutdownDataNodes();

  // Bring up a datanode with two default data dirs, but with one bad one.
  conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);

  // We use subdirectories 0 and 1 in order to have only a single
  // data dir's parent inject a failure.
  File tld = new File(MiniDFSCluster.getBaseDirectory(), "badData");
  File dataDir1 = new File(tld, "data1");
  File dataDir1Actual = new File(dataDir1, "1");
  dataDir1Actual.mkdirs();
  // Force an IOE to occur on one of the dfs.data.dir.
  File dataDir2 = new File(tld, "data2");
  prepareDirToFail(dataDir2);
  File dataDir2Actual = new File(dataDir2, "2");

  // Start one DN, with manually managed DN dir
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
      dataDir1Actual.getPath() + "," + dataDir2Actual.getPath());
  cluster.startDataNodes(conf, 1, false, null, null);
  cluster.waitActive();

  try {
    assertTrue("The DN should have started up fine.",
        cluster.isDataNodeUp());
    DataNode dn = cluster.getDataNodes().get(0);
    String si = DataNodeTestUtils.getFSDataset(dn).getStorageInfo();
    assertTrue("The DN should have started with this directory",
        si.contains(dataDir1Actual.getPath()));
    assertFalse("The DN shouldn't have a bad directory.",
        si.contains(dataDir2Actual.getPath()));
  } finally {
    cluster.shutdownDataNodes();
    FileUtil.chmod(dataDir2.toString(), "755");
  }

}