Java Code Examples for org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction

The following examples show how to use org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: NNAnalytics   Source File: TestWithMiniClusterBase.java    License: Apache License 2.0 6 votes vote down vote up
@Test //(timeout = 120000L)
public void testRestartFetchNamespace() throws Exception {
  // Shutdown NNA.
  long currentTxid = nna.getLoader().getCurrentTxId();
  nna.shutdown();

  // Trigger file system updates.
  addFiles(100, 0L);
  DistributedFileSystem fileSystem = (DistributedFileSystem) FileSystem.get(CONF);
  fileSystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  fileSystem.saveNamespace();
  fileSystem.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  nnaConf.set("nna.bootstrap.auto.fetch.namespace", "true");
  nna.init(nnaConf, null, CONF);
  long restartedTxid = nna.getLoader().getCurrentTxId();
  assertThat(restartedTxid, is(greaterThan(currentTxid + 99)));
}
 
Example 2
Source Project: big-c   Source File: TestSaveNamespace.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test for save namespace should succeed when parent directory renamed with
 * open lease and destination directory exist. 
 * This test is a regression for HDFS-2827
 */
@Test
public void testSaveNamespaceWithRenamedLease() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
      .numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
  OutputStream out = null;
  try {
    fs.mkdirs(new Path("/test-target"));
    out = fs.create(new Path("/test-source/foo")); // don't close
    fs.rename(new Path("/test-source/"), new Path("/test-target/"));

    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    cluster.getNameNodeRpc().saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  } finally {
    IOUtils.cleanup(LOG, out, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 3
Source Project: hadoop   Source File: TestRollingUpgradeDowngrade.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Ensure that during downgrade the NN fails to load a fsimage with newer
 * format.
 */
@Test(expected = IncorrectVersionException.class)
public void testRejectNewFsImage() throws IOException {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    NNStorage storage = spy(cluster.getNameNode().getFSImage().getStorage());
    int futureVersion = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1;
    doReturn(futureVersion).when(storage).getServiceLayoutVersion();
    storage.writeAll();
    cluster.restartNameNode(0, true, "-rollingUpgrade", "downgrade");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 4
Source Project: big-c   Source File: TestFSImage.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Ensure that the digest written by the saver equals to the digest of the
 * file.
 */
@Test
public void testDigest() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    DistributedFileSystem fs = cluster.getFileSystem();
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
        0);
    File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
        .getAbsolutePath());
    assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),
        MD5FileUtils.computeMd5ForFile(fsimage));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 5
Source Project: hadoop   Source File: TestRollingUpgrade.java    License: Apache License 2.0 6 votes vote down vote up
private static void startRollingUpgrade(Path foo, Path bar,
    Path file, byte[] data,
    MiniDFSCluster cluster) throws IOException {
  final DistributedFileSystem dfs = cluster.getFileSystem();

  //start rolling upgrade
  dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
  dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  dfs.mkdirs(bar);
  Assert.assertTrue(dfs.exists(foo));
  Assert.assertTrue(dfs.exists(bar));

  //truncate a file
  final int newLength = DFSUtil.getRandom().nextInt(data.length - 1) + 1;
  dfs.truncate(file, newLength);
  TestFileTruncate.checkBlockRecovery(file, dfs);
  AppendTestUtil.checkFullFile(dfs, file, newLength, data);
}
 
Example 6
Source Project: hadoop   Source File: TestRollingUpgrade.java    License: Apache License 2.0 6 votes vote down vote up
@Test (timeout = 300000)
public void testQueryAfterRestart() throws IOException, InterruptedException {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    DistributedFileSystem dfs = cluster.getFileSystem();

    dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    // start rolling upgrade
    dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
    queryForPreparation(dfs);
    dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    dfs.saveNamespace();
    dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

    cluster.restartNameNodes();
    dfs.rollingUpgrade(RollingUpgradeAction.QUERY);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 7
Source Project: hadoop   Source File: TestSaveNamespace.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test for save namespace should succeed when parent directory renamed with
 * open lease and destination directory exist. 
 * This test is a regression for HDFS-2827
 */
@Test
public void testSaveNamespaceWithRenamedLease() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
      .numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
  OutputStream out = null;
  try {
    fs.mkdirs(new Path("/test-target"));
    out = fs.create(new Path("/test-source/foo")); // don't close
    fs.rename(new Path("/test-source/"), new Path("/test-target/"));

    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    cluster.getNameNodeRpc().saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  } finally {
    IOUtils.cleanup(LOG, out, fs);
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 8
Source Project: big-c   Source File: TestRollingUpgrade.java    License: Apache License 2.0 6 votes vote down vote up
private static void startRollingUpgrade(Path foo, Path bar,
    Path file, byte[] data,
    MiniDFSCluster cluster) throws IOException {
  final DistributedFileSystem dfs = cluster.getFileSystem();

  //start rolling upgrade
  dfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  dfs.rollingUpgrade(RollingUpgradeAction.PREPARE);
  dfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);

  dfs.mkdirs(bar);
  Assert.assertTrue(dfs.exists(foo));
  Assert.assertTrue(dfs.exists(bar));

  //truncate a file
  final int newLength = DFSUtil.getRandom().nextInt(data.length - 1) + 1;
  dfs.truncate(file, newLength);
  TestFileTruncate.checkBlockRecovery(file, dfs);
  AppendTestUtil.checkFullFile(dfs, file, newLength, data);
}
 
Example 9
Source Project: big-c   Source File: TestSnapshotDeletion.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testCorrectNumberOfBlocksAfterRestart() throws IOException {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  final Path file = new Path(foo, "file");
  final String snapshotName = "ss0";

  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  hdfs.mkdirs(bar);
  hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
  hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
  hdfs.allowSnapshot(foo);

  hdfs.createSnapshot(foo, snapshotName);
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();

  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  hdfs.deleteSnapshot(foo, snapshotName);
  hdfs.delete(bar, true);
  hdfs.delete(foo, true);

  long numberOfBlocks = cluster.getNamesystem().getBlocksTotal();
  cluster.restartNameNode(0);
  assertEquals(numberOfBlocks, cluster.getNamesystem().getBlocksTotal());
}
 
Example 10
Source Project: big-c   Source File: TestSaveNamespace.java    License: Apache License 2.0 6 votes vote down vote up
@Test (timeout=30000)
public void testSaveNamespaceWithDanglingLease() throws Exception {
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
      .numDataNodes(1).build();
  cluster.waitActive();
  DistributedFileSystem fs = cluster.getFileSystem();
  try {
    cluster.getNamesystem().leaseManager.addLease("me", "/non-existent");      
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    cluster.getNameNodeRpc().saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 11
Source Project: big-c   Source File: TestRollingUpgradeDowngrade.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Ensure that during downgrade the NN fails to load a fsimage with newer
 * format.
 */
@Test(expected = IncorrectVersionException.class)
public void testRejectNewFsImage() throws IOException {
  final Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
    cluster.waitActive();
    DistributedFileSystem fs = cluster.getFileSystem();
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
    NNStorage storage = spy(cluster.getNameNode().getFSImage().getStorage());
    int futureVersion = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION - 1;
    doReturn(futureVersion).when(storage).getServiceLayoutVersion();
    storage.writeAll();
    cluster.restartNameNode(0, true, "-rollingUpgrade", "downgrade");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 12
Source Project: big-c   Source File: TestSaveNamespace.java    License: Apache License 2.0 5 votes vote down vote up
@Test (timeout=30000)
public void testSaveWhileEditsRolled() throws Exception {
  Configuration conf = getConf();
  NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
  DFSTestUtil.formatNameNode(conf);
  FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);

  try {
    doAnEdit(fsn, 1);
    CheckpointSignature sig = fsn.rollEditLog();
    LOG.warn("Checkpoint signature: " + sig);
    // Do another edit
    doAnEdit(fsn, 2);

    // Save namespace
    fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fsn.saveNamespace();

    // Now shut down and restart the NN
    fsn.close();
    fsn = null;

    // Start a new namesystem, which should be able to recover
    // the namespace from the previous incarnation.
    fsn = FSNamesystem.loadFromDisk(conf);

    // Make sure the image loaded including our edits.
    checkEditExists(fsn, 1);
    checkEditExists(fsn, 2);
  } finally {
    if (fsn != null) {
      fsn.close();
    }
  }
}
 
Example 13
Source Project: hadoop   Source File: BackupNode.java    License: Apache License 2.0 5 votes vote down vote up
@Override // NameNode
protected void initialize(Configuration conf) throws IOException {
  // Trash is disabled in BackupNameNode,
  // but should be turned back on if it ever becomes active.
  conf.setLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, 
               CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
  NamespaceInfo nsInfo = handshake(conf);
  super.initialize(conf);
  namesystem.setBlockPoolId(nsInfo.getBlockPoolID());

  if (false == namesystem.isInSafeMode()) {
    namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  }

  // Backup node should never do lease recovery,
  // therefore lease hard limit should never expire.
  namesystem.leaseManager.setLeasePeriod(
      HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);

  // register with the active name-node 
  registerWith(nsInfo);
  // Checkpoint daemon should start after the rpc server started
  runCheckpointDaemon(conf);
  InetSocketAddress addr = getHttpAddress();
  if (addr != null) {
    conf.set(BN_HTTP_ADDRESS_NAME_KEY, NetUtils.getHostPortString(getHttpAddress()));
  }
}
 
Example 14
Source Project: hadoop   Source File: NameNodeRpcServer.java    License: Apache License 2.0 5 votes vote down vote up
@Override // ClientProtocol
public boolean setSafeMode(SafeModeAction action, boolean isChecked)
    throws IOException {
  checkNNStartup();
  OperationCategory opCategory = OperationCategory.UNCHECKED;
  if (isChecked) {
    if (action == SafeModeAction.SAFEMODE_GET) {
      opCategory = OperationCategory.READ;
    } else {
      opCategory = OperationCategory.WRITE;
    }
  }
  namesystem.checkOperation(opCategory);
  return namesystem.setSafeMode(action);
}
 
Example 15
Source Project: hadoop   Source File: PBHelper.java    License: Apache License 2.0 5 votes vote down vote up
public static SafeModeActionProto convert(
    SafeModeAction a) {
  switch (a) {
  case SAFEMODE_LEAVE:
    return SafeModeActionProto.SAFEMODE_LEAVE;
  case SAFEMODE_ENTER:
    return SafeModeActionProto.SAFEMODE_ENTER;
  case SAFEMODE_GET:
    return SafeModeActionProto.SAFEMODE_GET;
  default:
    throw new IllegalArgumentException("Unexpected SafeModeAction :" + a);
  }
}
 
Example 16
Source Project: big-c   Source File: TestFetchImage.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Download a few fsimages using `hdfs dfsadmin -fetchImage ...' and verify
 * the results.
 */
@Test
public void testFetchImage() throws Exception {
  FETCHED_IMAGE_FILE.mkdirs();
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = null;
  try {
    DFSAdmin dfsAdmin = new DFSAdmin();
    dfsAdmin.setConf(conf);
    
    runFetchImage(dfsAdmin, cluster);
    
    fs = cluster.getFileSystem();
    fs.mkdirs(new Path("/foo"));
    fs.mkdirs(new Path("/foo2"));
    fs.mkdirs(new Path("/foo3"));
    
    cluster.getNameNodeRpc()
        .setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    cluster.getNameNodeRpc().saveNamespace();
    cluster.getNameNodeRpc()
        .setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
    
    runFetchImage(dfsAdmin, cluster);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 17
Source Project: hadoop   Source File: ClientNamenodeProtocolTranslatorPB.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException {
  SetSafeModeRequestProto req = SetSafeModeRequestProto.newBuilder()
      .setAction(PBHelper.convert(action)).setChecked(isChecked).build();
  try {
    return rpcProxy.setSafeMode(null, req).getResult();
  } catch (ServiceException e) {
    throw ProtobufHelper.getRemoteException(e);
  }
}
 
Example 18
Source Project: hadoop   Source File: DFSAdmin.java    License: Apache License 2.0 5 votes vote down vote up
private boolean waitExitSafeMode(DistributedFileSystem dfs, boolean inSafeMode)
    throws IOException {
  while (inSafeMode) {
    try {
      Thread.sleep(5000);
    } catch (java.lang.InterruptedException e) {
      throw new IOException("Wait Interrupted");
    }
    inSafeMode = dfs.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
  }
  return inSafeMode;
}
 
Example 19
Source Project: hadoop   Source File: DFSAdmin.java    License: Apache License 2.0 5 votes vote down vote up
private boolean waitExitSafeMode(ClientProtocol nn, boolean inSafeMode)
    throws IOException {
  while (inSafeMode) {
    try {
      Thread.sleep(5000);
    } catch (java.lang.InterruptedException e) {
      throw new IOException("Wait Interrupted");
    }
    inSafeMode = nn.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
  }
  return inSafeMode;
}
 
Example 20
Source Project: hadoop   Source File: TestFetchImage.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Download a few fsimages using `hdfs dfsadmin -fetchImage ...' and verify
 * the results.
 */
@Test
public void testFetchImage() throws Exception {
  FETCHED_IMAGE_FILE.mkdirs();
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  FileSystem fs = null;
  try {
    DFSAdmin dfsAdmin = new DFSAdmin();
    dfsAdmin.setConf(conf);
    
    runFetchImage(dfsAdmin, cluster);
    
    fs = cluster.getFileSystem();
    fs.mkdirs(new Path("/foo"));
    fs.mkdirs(new Path("/foo2"));
    fs.mkdirs(new Path("/foo3"));
    
    cluster.getNameNodeRpc()
        .setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    cluster.getNameNodeRpc().saveNamespace();
    cluster.getNameNodeRpc()
        .setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
    
    runFetchImage(dfsAdmin, cluster);
  } finally {
    if (fs != null) {
      fs.close();
    }
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 21
Source Project: hadoop   Source File: TestFSImageWithXAttr.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Restart the NameNode, optionally saving a new checkpoint.
 *
 * @param fs DistributedFileSystem used for saving namespace
 * @param persistNamespace boolean true to save a new checkpoint
 * @throws IOException if restart fails
 */
private void restart(DistributedFileSystem fs, boolean persistNamespace)
    throws IOException {
  if (persistNamespace) {
    fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fs.saveNamespace();
    fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  }

  cluster.restartNameNode();
  cluster.waitActive();
}
 
Example 22
Source Project: big-c   Source File: TestFSImageWithSnapshot.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test the fsimage loading while there is file under construction.
 */
@Test (timeout=60000)
public void testLoadImageWithAppending() throws Exception {
  Path sub1 = new Path(dir, "sub1");
  Path sub1file1 = new Path(sub1, "sub1file1");
  Path sub1file2 = new Path(sub1, "sub1file2");
  DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
  
  hdfs.allowSnapshot(dir);
  hdfs.createSnapshot(dir, "s0");
  
  HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));      
  
  // save namespace and restart cluster
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
 
Example 23
Source Project: hadoop   Source File: TestHASafeMode.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Regression test for HDFS-2804: standby should not populate replication
 * queues when exiting safe mode.
 */
@Test
public void testNoPopulatingReplQueuesWhenExitingSafemode() throws Exception {
  DFSTestUtil.createFile(fs, new Path("/test"), 15*BLOCK_SIZE, (short)3, 1L);
  
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  
  // get some blocks in the SBN's image
  nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  NameNodeAdapter.saveNamespace(nn1);
  nn1.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);

  // and some blocks in the edit logs
  DFSTestUtil.createFile(fs, new Path("/test2"), 15*BLOCK_SIZE, (short)3, 1L);
  nn0.getRpcServer().rollEditLog();
  
  cluster.stopDataNode(1);
  cluster.shutdownNameNode(1);

  //Configuration sbConf = cluster.getConfiguration(1);
  //sbConf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 1);
  cluster.restartNameNode(1, false);
  nn1 = cluster.getNameNode(1);
  GenericTestUtils.waitFor(new Supplier<Boolean>() {
    @Override
    public Boolean get() {
      return !nn1.isInSafeMode();
    }
  }, 100, 10000);
  
  BlockManagerTestUtil.updateState(nn1.getNamesystem().getBlockManager());
  assertEquals(0L, nn1.getNamesystem().getUnderReplicatedBlocks());
  assertEquals(0L, nn1.getNamesystem().getPendingReplicationBlocks());
}
 
Example 24
Source Project: hadoop   Source File: TestHASafeMode.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * DFS#isInSafeMode should check the ActiveNNs safemode in HA enabled cluster. HDFS-3507
 * 
 * @throws Exception
 */
@Test
public void testIsInSafemode() throws Exception {
  // Check for the standby nn without client failover.
  NameNode nn2 = cluster.getNameNode(1);
  assertTrue("nn2 should be in standby state", nn2.isStandbyState());

  InetSocketAddress nameNodeAddress = nn2.getNameNodeAddress();
  Configuration conf = new Configuration();
  DistributedFileSystem dfs = new DistributedFileSystem();
  try {
    dfs.initialize(
        URI.create("hdfs://" + nameNodeAddress.getHostName() + ":"
            + nameNodeAddress.getPort()), conf);
    dfs.isInSafeMode();
    fail("StandBy should throw exception for isInSafeMode");
  } catch (IOException e) {
    if (e instanceof RemoteException) {
      IOException sbExcpetion = ((RemoteException) e).unwrapRemoteException();
      assertTrue("StandBy nn should not support isInSafeMode",
          sbExcpetion instanceof StandbyException);
    } else {
      throw e;
    }
  } finally {
    if (null != dfs) {
      dfs.close();
    }
  }

  // Check with Client FailOver
  cluster.transitionToStandby(0);
  cluster.transitionToActive(1);
  cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  DistributedFileSystem dfsWithFailOver = (DistributedFileSystem) fs;
  assertTrue("ANN should be in SafeMode", dfsWithFailOver.isInSafeMode());

  cluster.getNameNodeRpc(1).setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
  assertFalse("ANN should be out of SafeMode", dfsWithFailOver.isInSafeMode());
}
 
Example 25
Source Project: big-c   Source File: TestCheckpoint.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test case where the secondary does a checkpoint, then stops for a while.
 * In the meantime, the NN saves its image several times, so that the
 * logs that connect the 2NN's old checkpoint to the current txid
 * get archived. Then, the 2NN tries to checkpoint again.
 */
@Test
public void testSecondaryHasVeryOutOfDateImage() throws IOException {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  Configuration conf = new HdfsConfiguration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .format(true).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN saves namespace 3 times
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    nn.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    for (int i = 0; i < 3; i++) {
      nn.saveNamespace();
    }
    nn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
    
    // Now the secondary tries to checkpoint again with its
    // old image in memory.
    secondary.doCheckpoint();
    
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 26
Source Project: hadoop   Source File: TestSaveNamespace.java    License: Apache License 2.0 5 votes vote down vote up
@Test (timeout=30000)
public void testSaveWhileEditsRolled() throws Exception {
  Configuration conf = getConf();
  NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
  DFSTestUtil.formatNameNode(conf);
  FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);

  try {
    doAnEdit(fsn, 1);
    CheckpointSignature sig = fsn.rollEditLog();
    LOG.warn("Checkpoint signature: " + sig);
    // Do another edit
    doAnEdit(fsn, 2);

    // Save namespace
    fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fsn.saveNamespace();

    // Now shut down and restart the NN
    fsn.close();
    fsn = null;

    // Start a new namesystem, which should be able to recover
    // the namespace from the previous incarnation.
    fsn = FSNamesystem.loadFromDisk(conf);

    // Make sure the image loaded including our edits.
    checkEditExists(fsn, 1);
    checkEditExists(fsn, 2);
  } finally {
    if (fsn != null) {
      fsn.close();
    }
  }
}
 
Example 27
Source Project: hadoop   Source File: TestSaveNamespace.java    License: Apache License 2.0 5 votes vote down vote up
@Test (timeout=30000)
public void testTxIdPersistence() throws Exception {
  Configuration conf = getConf();
  NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
  DFSTestUtil.formatNameNode(conf);
  FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);

  try {
    // We have a BEGIN_LOG_SEGMENT txn to start
    assertEquals(1, fsn.getEditLog().getLastWrittenTxId());
    doAnEdit(fsn, 1);
    assertEquals(2, fsn.getEditLog().getLastWrittenTxId());
    
    fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
    fsn.saveNamespace();

    // 2 more txns: END the first segment, BEGIN a new one
    assertEquals(4, fsn.getEditLog().getLastWrittenTxId());
    
    // Shut down and restart
    fsn.getFSImage().close();
    fsn.close();
    
    // 1 more txn to END that segment
    assertEquals(5, fsn.getEditLog().getLastWrittenTxId());
    fsn = null;
    
    fsn = FSNamesystem.loadFromDisk(conf);
    // 1 more txn to start new segment on restart
    assertEquals(6, fsn.getEditLog().getLastWrittenTxId());
    
  } finally {
    if (fsn != null) {
      fsn.close();
    }
  }
}
 
Example 28
Source Project: hadoop   Source File: TestCheckpoint.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test case where the secondary does a checkpoint, then stops for a while.
 * In the meantime, the NN saves its image several times, so that the
 * logs that connect the 2NN's old checkpoint to the current txid
 * get archived. Then, the 2NN tries to checkpoint again.
 */
@Test
public void testSecondaryHasVeryOutOfDateImage() throws IOException {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  Configuration conf = new HdfsConfiguration();

  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .format(true).build();

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once
    secondary.doCheckpoint();

    // Now primary NN saves namespace 3 times
    NamenodeProtocols nn = cluster.getNameNodeRpc();
    nn.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    for (int i = 0; i < 3; i++) {
      nn.saveNamespace();
    }
    nn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
    
    // Now the secondary tries to checkpoint again with its
    // old image in memory.
    secondary.doCheckpoint();
    
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 29
Source Project: hadoop   Source File: TestCheckpoint.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Regression test for HDFS-3835 - "Long-lived 2NN cannot perform a
 * checkpoint if security is enabled and the NN restarts without outstanding
 * delegation tokens"
 */
@Test
public void testSecondaryNameNodeWithDelegationTokens() throws IOException {
  MiniDFSCluster cluster = null;
  SecondaryNameNode secondary = null;
  
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
        .format(true).build();
    
    assertNotNull(cluster.getNamesystem().getDelegationToken(new Text("atm")));

    secondary = startSecondaryNameNode(conf);

    // Checkpoint once, so the 2NN loads the DT into its in-memory sate.
    secondary.doCheckpoint();
    
    // Perform a saveNamespace, so that the NN has a new fsimage, and the 2NN
    // therefore needs to download a new fsimage the next time it performs a
    // checkpoint.
    cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
    cluster.getNameNodeRpc().saveNamespace();
    cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
    
    // Ensure that the 2NN can still perform a checkpoint.
    secondary.doCheckpoint();
  } finally {
    cleanup(secondary);
    secondary = null;
    cleanup(cluster);
    cluster = null;
  }
}
 
Example 30
Source Project: big-c   Source File: TestSnapshotBlocksMap.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Make sure that a delete of a non-zero-length file which results in a
 * zero-length file in a snapshot works.
 */
@Test
public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  final byte[] testData = "foo bar baz".getBytes();
  
  // Create a zero-length file.
  DFSTestUtil.createFile(hdfs, bar, 0, REPLICATION, 0L);
  assertEquals(0, fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);

  // Create a snapshot that includes that file.
  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  
  // Extend that file.
  FSDataOutputStream out = hdfs.append(bar);
  out.write(testData);
  out.close();
  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(testData.length, blks[0].getNumBytes());
  
  // Delete the file.
  hdfs.delete(bar, true);
  
  // Now make sure that the NN can still save an fsimage successfully.
  cluster.getNameNode().getRpcServer().setSafeMode(
      SafeModeAction.SAFEMODE_ENTER, false);
  cluster.getNameNode().getRpcServer().saveNamespace();
}