Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil#createFile()

The following examples show how to use org.apache.hadoop.hdfs.DFSTestUtil#createFile() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestDataNodeMetrics.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testDataNodeMetrics() throws Exception {
  Configuration conf = new HdfsConfiguration();
  SimulatedFSDataset.setFactory(conf);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  try {
    FileSystem fs = cluster.getFileSystem();
    final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 
    DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
        LONG_FILE_LEN, (short)1, 1L);
    List<DataNode> datanodes = cluster.getDataNodes();
    assertEquals(datanodes.size(), 1);
    DataNode datanode = datanodes.get(0);
    MetricsRecordBuilder rb = getMetrics(datanode.getMetrics().name());
    assertCounter("BytesWritten", LONG_FILE_LEN, rb);
    assertTrue("Expected non-zero number of incremental block reports",
        getLongCounter("IncrementalBlockReportsNumOps", rb) > 0);
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 2
Source File: TestFsck.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Test for including the snapshot files in fsck report
 */
@Test
public void testFsckForSnapshotFiles() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .build();
  try {
    String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
        "-files");
    assertTrue(runFsck.contains("HEALTHY"));
    final String fileName = "/srcdat";
    DistributedFileSystem hdfs = cluster.getFileSystem();
    Path file1 = new Path(fileName);
    DFSTestUtil.createFile(hdfs, file1, 1024, (short) 1, 1000L);
    hdfs.allowSnapshot(new Path("/"));
    hdfs.createSnapshot(new Path("/"), "mySnapShot");
    runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files");
    assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat"));
    runFsck = runFsck(conf, 0, true, "/", "-files");
    assertFalse(runFsck.contains("mySnapShot"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 3
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Rename a file under a snapshottable directory, file does not exist
 * in a snapshot.
 */
@Test (timeout=60000)
public void testRenameFileNotInSnapshot() throws Exception {
  hdfs.mkdirs(sub1);
  hdfs.allowSnapshot(sub1);
  hdfs.createSnapshot(sub1, snap1);
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED);
  hdfs.rename(file1, file2);

  // Query the diff report and make sure it looks as expected.
  SnapshotDiffReport diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
  List<DiffReportEntry> entries = diffReport.getDiffList();
  assertTrue(entries.size() == 2);
  assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
  assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName(),
      null));
}
 
Example 4
Source File: TestProcessCorruptBlocks.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * The corrupt block has to be removed when the number of valid replicas
 * matches replication factor for the file. In this the above condition is
 * tested by reducing the replication factor 
 * The test strategy : 
 *   Bring up Cluster with 3 DataNodes
 *   Create a file of replication factor 3 
 *   Corrupt one replica of a block of the file 
 *   Verify that there are still 2 good replicas and 1 corrupt replica
 *    (corrupt replica should not be removed since number of good
 *     replicas (2) is less than replication factor (3))
 *   Set the replication factor to 2 
 *   Verify that the corrupt replica is removed. 
 *     (corrupt replica  should not be removed since number of good
 *      replicas (2) is equal to replication factor (2))
 */
@Test
public void testWhenDecreasingReplication() throws Exception {
  Configuration conf = new HdfsConfiguration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
  conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  FileSystem fs = cluster.getFileSystem();
  final FSNamesystem namesystem = cluster.getNamesystem();

  try {
    final Path fileName = new Path("/foo1");
    DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
    DFSTestUtil.waitReplication(fs, fileName, (short) 3);

    ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
    corruptBlock(cluster, fs, fileName, 0, block);

    DFSTestUtil.waitReplication(fs, fileName, (short) 2);

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(1, countReplicas(namesystem, block).corruptReplicas());

    namesystem.setReplication(fileName.toString(), (short) 2);

    // wait for 3 seconds so that all block reports are processed.
    try {
      Thread.sleep(3000);
    } catch (InterruptedException ignored) {
    }

    assertEquals(2, countReplicas(namesystem, block).liveReplicas());
    assertEquals(0, countReplicas(namesystem, block).corruptReplicas());

  } finally {
    cluster.shutdown();
  }
}
 
Example 5
Source File: TestSnapshotDiffReport.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Renaming a file/dir then delete the ancestor dir of the rename target
 * should be reported as deleted.
 */
@Test
public void testDiffReportWithRenameAndDelete() throws Exception {
  final Path root = new Path("/");
  final Path dir1 = new Path(root, "dir1");
  final Path dir2 = new Path(root, "dir2");
  final Path foo = new Path(dir1, "foo");
  final Path fileInFoo = new Path(foo, "file");
  final Path bar = new Path(dir2, "bar");
  final Path fileInBar = new Path(bar, "file");
  DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPLICATION, seed);

  SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
  hdfs.rename(fileInFoo, fileInBar, Rename.OVERWRITE);
  SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
  verifyDiffReport(root, "s0", "s1",
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2/bar")),
      new DiffReportEntry(DiffType.DELETE, DFSUtil
          .string2Bytes("dir2/bar/file")),
      new DiffReportEntry(DiffType.RENAME,
          DFSUtil.string2Bytes("dir1/foo/file"),
          DFSUtil.string2Bytes("dir2/bar/file")));

  // delete bar
  hdfs.delete(bar, true);
  SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
  verifyDiffReport(root, "s0", "s2",
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
      new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
      new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("dir2/bar")),
      new DiffReportEntry(DiffType.DELETE,
          DFSUtil.string2Bytes("dir1/foo/file")));
}
 
Example 6
Source File: TestQuotaByStorageType.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateTruncate() throws Exception {
  final Path foo = new Path(dir, "foo");
  Path createdFile1 = new Path(foo, "created_file1.data");
  dfs.mkdirs(foo);

  // set storage policy on directory "foo" to ONESSD
  dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);

  // set quota by storage type on directory "foo"
  dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
  INode fnode = fsdir.getINode4Write(foo.toString());
  assertTrue(fnode.isDirectory());
  assertTrue(fnode.isQuotaSet());

  // Create file of size 2 * BLOCKSIZE under directory "foo"
  long file1Len = BLOCKSIZE * 2;
  int bufLen = BLOCKSIZE / 16;
  DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);

  // Verify SSD consumed before truncate
  long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(file1Len, ssdConsumed);

  // Truncate file to 1 * BLOCKSIZE
  int newFile1Len = BLOCKSIZE * 1;
  dfs.truncate(createdFile1, newFile1Len);

  // Verify SSD consumed after truncate
  ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
      .getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
  assertEquals(newFile1Len, ssdConsumed);

  ContentSummary cs = dfs.getContentSummary(foo);
  assertEquals(cs.getSpaceConsumed(), newFile1Len * REPLICATION);
  assertEquals(cs.getTypeConsumed(StorageType.SSD), newFile1Len);
  assertEquals(cs.getTypeConsumed(StorageType.DISK), newFile1Len * 2);
}
 
Example 7
Source File: TestFsck.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test for checking fsck command on illegal arguments should print the proper
 * usage.
 */
@Test
public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    // bring up a one-node cluster
    Configuration conf = new HdfsConfiguration();
    cluster = new MiniDFSCluster.Builder(conf).build();
    String fileName = "/test.txt";
    Path filePath = new Path(fileName);
    FileSystem fs = cluster.getFileSystem();

    // create a one-block file
    DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
    DFSTestUtil.waitReplication(fs, filePath, (short) 1);

    // passing illegal option
    String outStr = runFsck(conf, -1, true, fileName, "-thisIsNotAValidFlag");
    System.out.println(outStr);
    assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));

    // passing multiple paths are arguments
    outStr = runFsck(conf, -1, true, "/", fileName);
    System.out.println(outStr);
    assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
    // clean up file system
    fs.delete(filePath, true);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 8
Source File: TestSnapshotBlocksMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * 1. rename under-construction file with 0-sized blocks after snapshot.
 * 2. delete the renamed directory.
 * make sure we delete the 0-sized block.
 * see HDFS-5476.
 */
@Test
public void testDeletionWithZeroSizeBlock3() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  // rename bar
  final Path bar2 = new Path(subDir, "bar2");
  hdfs.rename(bar, bar2);
  
  INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
  blks = bar2Node.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  // delete subDir
  hdfs.delete(subDir, true);
  
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
Example 9
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * This test demonstrates that 
 * {@link INodeDirectory#removeChild}
 * and 
 * {@link INodeDirectory#addChild}
 * should use {@link INode#isInLatestSnapshot} to check if the
 * added/removed child should be recorded in snapshots.
 */
@Test
public void testRenameDirAndDeleteSnapshot_5() throws Exception {
  final Path dir1 = new Path("/dir1");
  final Path dir2 = new Path("/dir2");
  final Path dir3 = new Path("/dir3");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(dir2);
  hdfs.mkdirs(dir3);
  
  final Path foo = new Path(dir1, "foo");
  hdfs.mkdirs(foo);
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  final Path bar = new Path(foo, "bar");
  // create file bar, and foo will become an INodeDirectory with snapshot
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  // delete snapshot s1. now foo is not in any snapshot
  hdfs.deleteSnapshot(dir1, "s1");
  
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  // rename /dir1/foo to /dir2/foo
  final Path foo2 = new Path(dir2, foo.getName());
  hdfs.rename(foo, foo2);
  // rename /dir2/foo/bar to /dir3/foo/bar
  final Path bar2 = new Path(dir2, "foo/bar");
  final Path bar3 = new Path(dir3, "bar");
  hdfs.rename(bar2, bar3);
  
  // delete /dir2/foo. Since it is not in any snapshot, we will call its 
  // destroy function. If we do not use isInLatestSnapshot in removeChild and
  // addChild methods in INodeDirectory (with snapshot), the file bar will be 
  // stored in the deleted list of foo, and will be destroyed.
  hdfs.delete(foo2, true);
  
  // check if /dir3/bar still exists
  assertTrue(hdfs.exists(bar3));
  INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString());
  assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent());
}
 
Example 10
Source File: TestDistCpSync.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void initData2(Path dir) throws Exception {
  final Path test = new Path(dir, "test");
  final Path foo = new Path(dir, "foo");
  final Path bar = new Path(dir, "bar");
  final Path f1 = new Path(test, "f1");
  final Path f2 = new Path(foo, "f2");
  final Path f3 = new Path(bar, "f3");

  DFSTestUtil.createFile(dfs, f1, BLOCK_SIZE, DATA_NUM, 0L);
  DFSTestUtil.createFile(dfs, f2, BLOCK_SIZE, DATA_NUM, 1L);
  DFSTestUtil.createFile(dfs, f3, BLOCK_SIZE, DATA_NUM, 2L);
}
 
Example 11
Source File: TestBlocksWithNotEnoughRacks.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
public void testReduceReplFactorRespectsRackPolicy() throws Exception {
  Configuration conf = getConf();
  short REPLICATION_FACTOR = 3;
  final Path filePath = new Path("/testFile");
  String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .numDataNodes(racks.length).racks(racks).build();
  final FSNamesystem ns = cluster.getNameNode().getNamesystem();

  try {
    // Create a file with one block
    final FileSystem fs = cluster.getFileSystem();
    DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
    ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);

    // Decrease the replication factor, make sure the deleted replica
    // was not the one that lived on the rack with only one replica,
    // ie we should still have 2 racks after reducing the repl factor.
    REPLICATION_FACTOR = 2;
    NameNodeAdapter.setReplication(ns, "/testFile", REPLICATION_FACTOR); 

    DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
  } finally {
    cluster.shutdown();
  }
}
 
Example 12
Source File: TestDiskError.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test to check that a DN goes down when all its volumes have failed.
 */
@Test
public void testShutdown() throws Exception {
  if (System.getProperty("os.name").startsWith("Windows")) {
    /**
     * This test depends on OS not allowing file creations on a directory
     * that does not have write permissions for the user. Apparently it is 
     * not the case on Windows (at least under Cygwin), and possibly AIX.
     * This is disabled on Windows.
     */
    return;
  }
  // Bring up two more datanodes
  cluster.startDataNodes(conf, 2, true, null, null);
  cluster.waitActive();
  final int dnIndex = 0;
  String bpid = cluster.getNamesystem().getBlockPoolId();
  File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
  File dir1 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  storageDir = cluster.getInstanceStorageDir(dnIndex, 1);
  File dir2 = MiniDFSCluster.getRbwDir(storageDir, bpid);
  try {
    // make the data directory of the first datanode to be readonly
    assertTrue("Couldn't chmod local vol", dir1.setReadOnly());
    assertTrue("Couldn't chmod local vol", dir2.setReadOnly());

    // create files and make sure that first datanode will be down
    DataNode dn = cluster.getDataNodes().get(dnIndex);
    for (int i=0; dn.isDatanodeUp(); i++) {
      Path fileName = new Path("/test.txt"+i);
      DFSTestUtil.createFile(fs, fileName, 1024, (short)2, 1L);
      DFSTestUtil.waitReplication(fs, fileName, (short)2);
      fs.delete(fileName, true);
    }
  } finally {
    // restore its old permission
    FileUtil.setWritable(dir1, true);
    FileUtil.setWritable(dir2, true);
  }
}
 
Example 13
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test rename while the rename operation will exceed the quota in the dst
 * tree.
 */
@Test
public void testRenameUndo_5() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path subdir2 = new Path(dir2, "subdir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subdir2);
  
  final Path foo = new Path(dir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  
  // set ns quota of dir2 to 4, so the current remaining is 2 (already has
  // dir2, and subdir2)
  hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
  
  final Path foo2 = new Path(subdir2, foo.getName());
  FSDirectory fsdir2 = Mockito.spy(fsdir);
  Mockito.doThrow(new NSQuotaExceededException("fake exception")).when(fsdir2)
      .addLastINode((INodesInPath) Mockito.anyObject(),
          (INode) Mockito.anyObject(), Mockito.anyBoolean());
  Whitebox.setInternalState(fsn, "dir", fsdir2);
  // rename /test/dir1/foo to /test/dir2/subdir2/foo. 
  // FSDirectory#verifyQuota4Rename will pass since the remaining quota is 2.
  // However, the rename operation will fail since we let addLastINode throw
  // NSQuotaExceededException
  boolean rename = hdfs.rename(foo, foo2);
  assertFalse(rename);
  
  // check the undo
  assertTrue(hdfs.exists(foo));
  assertTrue(hdfs.exists(bar));
  INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.asDirectory().isWithSnapshot());
  INode barNode = fsdir2.getINode4Write(bar.toString());
  assertTrue(barNode.getClass() == INodeFile.class);
  assertSame(fooNode, barNode.getParent());
  List<DirectoryDiff> diffList = dir1Node
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  
  // check dir2
  INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
  assertTrue(dir2Node.isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
  assertEquals(2, counts.getNameSpace());
  assertEquals(0, counts.getStorageSpace());
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir2.getINode4Write(subdir2.toString()));
  diffList = dir2Node.getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
 
Example 14
Source File: TestNameNodePrunesMissingStorages.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private static void runTest(final String testCaseName,
                            final boolean createFiles,
                            final int numInitialStorages,
                            final int expectedStoragesAfterTest) throws IOException {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = null;

  try {
    cluster = new MiniDFSCluster
        .Builder(conf)
        .numDataNodes(1)
        .storagesPerDatanode(numInitialStorages)
        .build();
    cluster.waitActive();

    final DataNode dn0 = cluster.getDataNodes().get(0);

    // Ensure NN knows about the storage.
    final DatanodeID dnId = dn0.getDatanodeId();
    final DatanodeDescriptor dnDescriptor =
        cluster.getNamesystem().getBlockManager().getDatanodeManager().getDatanode(dnId);
    assertThat(dnDescriptor.getStorageInfos().length, is(numInitialStorages));

    final String bpid = cluster.getNamesystem().getBlockPoolId();
    final DatanodeRegistration dnReg = dn0.getDNRegistrationForBP(bpid);
    DataNodeTestUtils.triggerBlockReport(dn0);

    if (createFiles) {
      final Path path = new Path("/", testCaseName);
      DFSTestUtil.createFile(
          cluster.getFileSystem(), path, 1024, (short) 1, 0x1BAD5EED);
      DataNodeTestUtils.triggerBlockReport(dn0);
    }

    // Generate a fake StorageReport that is missing one storage.
    final StorageReport reports[] =
        dn0.getFSDataset().getStorageReports(bpid);
    final StorageReport prunedReports[] = new StorageReport[numInitialStorages - 1];
    System.arraycopy(reports, 0, prunedReports, 0, prunedReports.length);

    // Stop the DataNode and send fake heartbeat with missing storage.
    cluster.stopDataNode(0);
    cluster.getNameNodeRpc().sendHeartbeat(dnReg, prunedReports, 0L, 0L, 0, 0,
        0, null);

    // Check that the missing storage was pruned.
    assertThat(dnDescriptor.getStorageInfos().length, is(expectedStoragesAfterTest));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 15
Source File: TestINodeFile.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * FSDirectory#unprotectedSetQuota creates a new INodeDirectoryWithQuota to
 * replace the original INodeDirectory. Before HDFS-4243, the parent field of
 * all the children INodes of the target INodeDirectory is not changed to
 * point to the new INodeDirectoryWithQuota. This testcase tests this
 * scenario.
 */
@Test
public void testGetFullPathNameAfterSetQuota() throws Exception {
  long fileLen = 1024;
  replication = 3;
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(replication).build();
    cluster.waitActive();
    FSNamesystem fsn = cluster.getNamesystem();
    FSDirectory fsdir = fsn.getFSDirectory();
    DistributedFileSystem dfs = cluster.getFileSystem();

    // Create a file for test
    final Path dir = new Path("/dir");
    final Path file = new Path(dir, "file");
    DFSTestUtil.createFile(dfs, file, fileLen, replication, 0L);

    // Check the full path name of the INode associating with the file
    INode fnode = fsdir.getINode(file.toString());
    assertEquals(file.toString(), fnode.getFullPathName());
    
    // Call FSDirectory#unprotectedSetQuota which calls
    // INodeDirectory#replaceChild
    dfs.setQuota(dir, Long.MAX_VALUE - 1, replication * fileLen * 10);
    INodeDirectory dirNode = getDir(fsdir, dir);
    assertEquals(dir.toString(), dirNode.getFullPathName());
    assertTrue(dirNode.isWithQuota());
    
    final Path newDir = new Path("/newdir");
    final Path newFile = new Path(newDir, "file");
    // Also rename dir
    dfs.rename(dir, newDir, Options.Rename.OVERWRITE);
    // /dir/file now should be renamed to /newdir/file
    fnode = fsdir.getINode(newFile.toString());
    // getFullPathName can return correct result only if the parent field of
    // child node is set correctly
    assertEquals(newFile.toString(), fnode.getFullPathName());
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 16
Source File: TestFSImageWithSnapshot.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Testing a special case with snapshots. When the following steps happen:
 * <pre>
 * 1. Take snapshot s1 on dir.
 * 2. Create new dir and files under subsubDir, which is descendant of dir.
 * 3. Take snapshot s2 on dir.
 * 4. Delete subsubDir.
 * 5. Delete snapshot s2.
 * </pre>
 * When we merge the diff from s2 to s1 (since we deleted s2), we need to make
 * sure all the files/dirs created after s1 should be destroyed. Otherwise
 * we may save these files/dirs to the fsimage, and cause FileNotFound 
 * Exception while loading fsimage.  
 */
@Test (timeout=300000)
public void testSaveLoadImageAfterSnapshotDeletion()
    throws Exception {
  // create initial dir and subdir
  Path dir = new Path("/dir");
  Path subDir = new Path(dir, "subdir");
  Path subsubDir = new Path(subDir, "subsubdir");
  hdfs.mkdirs(subsubDir);
  
  // take snapshots on subdir and dir
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  
  // create new dir under initial dir
  Path newDir = new Path(subsubDir, "newdir");
  Path newFile = new Path(newDir, "newfile");
  hdfs.mkdirs(newDir);
  DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
  
  // create another snapshot
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s2");
  
  // delete subsubdir
  hdfs.delete(subsubDir, true);
  
  // delete snapshot s2
  hdfs.deleteSnapshot(dir, "s2");
  
  // restart cluster
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
      .format(false).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
  
  // save namespace to fsimage
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
  hdfs.saveNamespace();
  hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
  
  cluster.shutdown();
  cluster = new MiniDFSCluster.Builder(conf).format(false)
      .numDataNodes(REPLICATION).build();
  cluster.waitActive();
  fsn = cluster.getNamesystem();
  hdfs = cluster.getFileSystem();
}
 
Example 17
Source File: TestHASafeMode.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Similar to {@link #testBlocksRemovedWhileInSafeMode()} except that
 * the OP_DELETE edits arrive at the SBN before the block deletion reports.
 * The tracking of safe blocks needs to properly account for the removal
 * of the blocks as well as the safe count. This is a regression test for
 * HDFS-2742.
 */
@Test
public void testBlocksRemovedWhileInSafeModeEditsArriveFirst() throws Exception {
  banner("Starting with NN0 active and NN1 standby, creating some blocks");
  DFSTestUtil.createFile(fs, new Path("/test"), 10*BLOCK_SIZE, (short) 3, 1L);

  // Roll edit log so that, when the SBN restarts, it will load
  // the namespace during startup.
  nn0.getRpcServer().rollEditLog();
 
  banner("Restarting standby");
  restartStandby();
  
  // It will initially have all of the blocks necessary.
  String status = nn1.getNamesystem().getSafemode();
  assertTrue("Bad safemode status: '" + status + "'",
    status.startsWith(
      "Safe mode is ON. The reported blocks 10 has reached the threshold "
      + "0.9990 of total blocks 10. The number of live datanodes 3 has "
      + "reached the minimum number 0. In safe mode extension. "
      + "Safe mode will be turned off automatically"));

  // Delete those blocks while the SBN is in safe mode.
  // Immediately roll the edit log before the actual deletions are sent
  // to the DNs.
  banner("Removing the blocks without rolling the edit log");
  fs.delete(new Path("/test"), true);
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);

  // Should see removal of the blocks as well as their contribution to safe block count.
  assertSafeMode(nn1, 0, 0, 3, 0);

  
  banner("Triggering sending deletions to DNs and Deletion Reports");
  BlockManagerTestUtil.computeAllPendingWork(
      nn0.getNamesystem().getBlockManager());    
  cluster.triggerHeartbeats();
  HATestUtil.waitForDNDeletions(cluster);
  cluster.triggerDeletionReports();

  // No change in assertion status here, but some of the consistency checks
  // in safemode will fire here if we accidentally decrement safe block count
  // below 0.    
  assertSafeMode(nn1, 0, 0, 3, 0);
}
 
Example 18
Source File: TestTruncateQuotaUpdate.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Override
public void prepare() throws Exception {
  // original file size: 2.5 block
  DFSTestUtil.createFile(dfs, file, BLOCKSIZE * 2 + BLOCKSIZE / 2,
      REPLICATION, 0L);
}
 
Example 19
Source File: TestSnapshottableDirListing.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test listing all the snapshottable directories
 */
@Test (timeout=60000)
public void testListSnapshottableDir() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  // Initially there is no snapshottable directories in the system
  SnapshottableDirectoryStatus[] dirs = hdfs.getSnapshottableDirListing();
  assertNull(dirs);
  
  // Make root as snapshottable
  final Path root = new Path("/");
  hdfs.allowSnapshot(root);
  dirs = hdfs.getSnapshottableDirListing();
  assertEquals(1, dirs.length);
  assertEquals("", dirs[0].getDirStatus().getLocalName());
  assertEquals(root, dirs[0].getFullPath());
  
  // Make root non-snaphsottable
  hdfs.disallowSnapshot(root);
  dirs = hdfs.getSnapshottableDirListing();
  assertNull(dirs);
  
  // Make dir1 as snapshottable
  hdfs.allowSnapshot(dir1);
  dirs = hdfs.getSnapshottableDirListing();
  assertEquals(1, dirs.length);
  assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName());
  assertEquals(dir1, dirs[0].getFullPath());
  // There is no snapshot for dir1 yet
  assertEquals(0, dirs[0].getSnapshotNumber());
  
  // Make dir2 as snapshottable
  hdfs.allowSnapshot(dir2);
  dirs = hdfs.getSnapshottableDirListing();
  assertEquals(2, dirs.length);
  assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName());
  assertEquals(dir1, dirs[0].getFullPath());
  assertEquals(dir2.getName(), dirs[1].getDirStatus().getLocalName());
  assertEquals(dir2, dirs[1].getFullPath());
  // There is no snapshot for dir2 yet
  assertEquals(0, dirs[1].getSnapshotNumber());
  
  // Create dir3
  final Path dir3 = new Path("/TestSnapshot3");
  hdfs.mkdirs(dir3);
  // Rename dir3 to dir2
  hdfs.rename(dir3, dir2, Rename.OVERWRITE);
  // Now we only have one snapshottable dir: dir1
  dirs = hdfs.getSnapshottableDirListing();
  assertEquals(1, dirs.length);
  assertEquals(dir1, dirs[0].getFullPath());
  
  // Make dir2 snapshottable again
  hdfs.allowSnapshot(dir2);
  // Create a snapshot for dir2
  hdfs.createSnapshot(dir2, "s1");
  hdfs.createSnapshot(dir2, "s2");
  dirs = hdfs.getSnapshottableDirListing();
  // There are now 2 snapshots for dir2
  assertEquals(dir2, dirs[1].getFullPath());
  assertEquals(2, dirs[1].getSnapshotNumber());
  
  // Create sub-dirs under dir1
  Path sub1 = new Path(dir1, "sub1");
  Path file1 =  new Path(sub1, "file1");
  Path sub2 = new Path(dir1, "sub2");
  Path file2 =  new Path(sub2, "file2");
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
  DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
  // Make sub1 and sub2 snapshottable
  hdfs.allowSnapshot(sub1);
  hdfs.allowSnapshot(sub2);
  dirs = hdfs.getSnapshottableDirListing();
  assertEquals(4, dirs.length);
  assertEquals(dir1, dirs[0].getFullPath());
  assertEquals(dir2, dirs[1].getFullPath());
  assertEquals(sub1, dirs[2].getFullPath());
  assertEquals(sub2, dirs[3].getFullPath());
  
  // reset sub1
  hdfs.disallowSnapshot(sub1);
  dirs = hdfs.getSnapshottableDirListing();
  assertEquals(3, dirs.length);
  assertEquals(dir1, dirs[0].getFullPath());
  assertEquals(dir2, dirs[1].getFullPath());
  assertEquals(sub2, dirs[2].getFullPath());
  
  // Remove dir1, both dir1 and sub2 will be removed
  hdfs.delete(dir1, true);
  dirs = hdfs.getSnapshottableDirListing();
  assertEquals(1, dirs.length);
  assertEquals(dir2.getName(), dirs[0].getDirStatus().getLocalName());
  assertEquals(dir2, dirs[0].getFullPath());
}
 
Example 20
Source File: TestDNFencing.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test case that reduces replication of a file with a lot of blocks
 * and then fails over right after those blocks enter the DN invalidation
 * queues on the active. Ensures that fencing is correct and no replicas
 * are lost.
 */
@Test
public void testNNClearsCommandsOnFailoverWithReplChanges()
    throws Exception {
  // Make lots of blocks to increase chances of triggering a bug.
  DFSTestUtil.createFile(fs, TEST_FILE_PATH, 30*SMALL_BLOCK, (short)1, 1L);

  banner("rolling NN1's edit log, forcing catch-up");
  HATestUtil.waitForStandbyToCatchUp(nn1, nn2);
  
  // Get some new replicas reported so that NN2 now considers
  // them over-replicated and schedules some more deletions
  nn1.getRpcServer().setReplication(TEST_FILE, (short) 2);
  while (BlockManagerTestUtil.getComputedDatanodeWork(
      nn1.getNamesystem().getBlockManager()) > 0) {
    LOG.info("Getting more replication work computed");
  }
  BlockManager bm1 = nn1.getNamesystem().getBlockManager();
  while (bm1.getPendingReplicationBlocksCount() > 0) {
    BlockManagerTestUtil.updateState(bm1);
    cluster.triggerHeartbeats();
    Thread.sleep(1000);
  }
  
  banner("triggering BRs");
  cluster.triggerBlockReports();
  
  nn1.getRpcServer().setReplication(TEST_FILE, (short) 1);

  
  banner("computing invalidation on nn1");

  BlockManagerTestUtil.computeInvalidationWork(
      nn1.getNamesystem().getBlockManager());
  doMetasave(nn1);

  banner("computing invalidation on nn2");
  BlockManagerTestUtil.computeInvalidationWork(
      nn2.getNamesystem().getBlockManager());
  doMetasave(nn2);

  // Dump some info for debugging purposes.
  banner("Metadata immediately before failover");
  doMetasave(nn2);


  // Transition nn2 to active even though nn1 still thinks it's active
  banner("Failing to NN2 but let NN1 continue to think it's active");
  NameNodeAdapter.abortEditLogs(nn1);
  NameNodeAdapter.enterSafeMode(nn1, false);

  
  BlockManagerTestUtil.computeInvalidationWork(
      nn2.getNamesystem().getBlockManager());
  cluster.transitionToActive(1);

  // Check that the standby picked up the replication change.
  assertEquals(1,
      nn2.getRpcServer().getFileInfo(TEST_FILE).getReplication());

  // Dump some info for debugging purposes.
  banner("Metadata immediately after failover");
  doMetasave(nn2);
  
  banner("Triggering heartbeats and block reports so that fencing is completed");
  cluster.triggerHeartbeats();
  cluster.triggerBlockReports();
  
  banner("Metadata after nodes have all block-reported");
  doMetasave(nn2);
  
  // Force a rescan of postponedMisreplicatedBlocks.
  BlockManager nn2BM = nn2.getNamesystem().getBlockManager();
  BlockManagerTestUtil.checkHeartbeat(nn2BM);
  BlockManagerTestUtil.rescanPostponedMisreplicatedBlocks(nn2BM);

  // The block should no longer be postponed.
  assertEquals(0, nn2.getNamesystem().getPostponedMisreplicatedBlocks());
  
  // Wait for NN2 to enact its deletions (replication monitor has to run, etc)
  BlockManagerTestUtil.computeInvalidationWork(
      nn2.getNamesystem().getBlockManager());

  HATestUtil.waitForNNToIssueDeletions(nn2);
  cluster.triggerHeartbeats();
  HATestUtil.waitForDNDeletions(cluster);
  cluster.triggerDeletionReports();
  assertEquals(0, nn2.getNamesystem().getUnderReplicatedBlocks());
  assertEquals(0, nn2.getNamesystem().getPendingReplicationBlocks());
  
  banner("Making sure the file is still readable");
  FileSystem fs2 = cluster.getFileSystem(1);
  DFSTestUtil.readFile(fs2, TEST_FILE_PATH);
}