org.apache.hadoop.hdfs.server.namenode.INodeFile Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.INodeFile. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestSnapshotReplication.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Check the replication for both the current file and all its prior snapshots
 * 
 * @param currentFile
 *          the Path of the current file
 * @param snapshotRepMap
 *          A map maintaining all the snapshots of the current file, as well
 *          as their expected replication number stored in their corresponding
 *          INodes
 * @param expectedBlockRep
 *          The expected replication number
 * @throws Exception
 */
private void checkSnapshotFileReplication(Path currentFile,
    Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
  // First check the getBlockReplication for the INode of the currentFile
  final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
  assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
  // Then check replication for every snapshot
  for (Path ss : snapshotRepMap.keySet()) {
    final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true);
    final INodeFile ssInode = iip.getLastINode().asFile();
    // The replication number derived from the
    // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
    assertEquals(expectedBlockRep, ssInode.getBlockReplication());
    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication(iip.getPathSnapshotId()));
  }
}
 
Example #2
Source File: FileWithSnapshotFeature.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
    final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
    return new QuotaCounts.Builder().build();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
 
Example #3
Source File: FileWithSnapshotFeature.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(bsps, info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, info);
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
 
Example #4
Source File: TestSnapshotReplication.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Check the replication for both the current file and all its prior snapshots
 * 
 * @param currentFile
 *          the Path of the current file
 * @param snapshotRepMap
 *          A map maintaining all the snapshots of the current file, as well
 *          as their expected replication number stored in their corresponding
 *          INodes
 * @param expectedBlockRep
 *          The expected replication number
 * @throws Exception
 */
private void checkSnapshotFileReplication(Path currentFile,
    Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
  // First check the getBlockReplication for the INode of the currentFile
  final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
  assertEquals(expectedBlockRep, inodeOfCurrentFile.getBlockReplication());
  // Then check replication for every snapshot
  for (Path ss : snapshotRepMap.keySet()) {
    final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true);
    final INodeFile ssInode = iip.getLastINode().asFile();
    // The replication number derived from the
    // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
    assertEquals(expectedBlockRep, ssInode.getBlockReplication());
    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication(iip.getPathSnapshotId()));
  }
}
 
Example #5
Source File: FileWithSnapshotFeature.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * If some blocks at the end of the block list no longer belongs to
 * any inode, collect them and update the block list.
 */
public void collectBlocksAndClear(final BlockStoragePolicySuite bsps, final INodeFile file,
    final BlocksMapUpdateInfo info, final List<INode> removedINodes) {
  // check if everything is deleted.
  if (isCurrentFileDeleted() && getDiffs().asList().isEmpty()) {
    file.destroyAndCollectBlocks(bsps, info, removedINodes);
    return;
  }
  // find max file size.
  final long max;
  FileDiff diff = getDiffs().getLast();
  if (isCurrentFileDeleted()) {
    max = diff == null? 0: diff.getFileSize();
  } else { 
    max = file.computeFileSize();
  }

  // Collect blocks that should be deleted
  FileDiff last = diffs.getLast();
  BlockInfoContiguous[] snapshotBlocks = last == null ? null : last.getBlocks();
  if(snapshotBlocks == null)
    file.collectBlocksBeyondMax(max, info);
  else
    file.collectBlocksBeyondSnapshot(snapshotBlocks, info);
}
 
Example #6
Source File: FileWithSnapshotFeature.java    From big-c with Apache License 2.0 6 votes vote down vote up
public QuotaCounts cleanFile(final BlockStoragePolicySuite bsps,
    final INodeFile file, final int snapshotId,
    int priorSnapshotId, final BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  if (snapshotId == Snapshot.CURRENT_STATE_ID) {
    // delete the current file while the file has snapshot feature
    if (!isCurrentFileDeleted()) {
      file.recordModification(priorSnapshotId);
      deleteCurrentFile();
    }
    collectBlocksAndClear(bsps, file, collectedBlocks, removedINodes);
    return new QuotaCounts.Builder().build();
  } else { // delete the snapshot
    priorSnapshotId = getDiffs().updatePrior(snapshotId, priorSnapshotId);
    return diffs.deleteSnapshotDiff(bsps, snapshotId, priorSnapshotId, file,
        collectedBlocks, removedINodes);
  }
}
 
Example #7
Source File: TestSnapshotBlocksMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure that a delete of a non-zero-length file which results in a
 * zero-length file in a snapshot works.
 */
@Test
public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  final byte[] testData = "foo bar baz".getBytes();
  
  // Create a zero-length file.
  DFSTestUtil.createFile(hdfs, bar, 0, REPLICATION, 0L);
  assertEquals(0, fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);

  // Create a snapshot that includes that file.
  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  
  // Extend that file.
  FSDataOutputStream out = hdfs.append(bar);
  out.write(testData);
  out.close();
  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(testData.length, blks[0].getNumBytes());
  
  // Delete the file.
  hdfs.delete(bar, true);
  
  // Now make sure that the NN can still save an fsimage successfully.
  cluster.getNameNode().getRpcServer().setSafeMode(
      SafeModeAction.SAFEMODE_ENTER, false);
  cluster.getNameNode().getRpcServer().saveNamespace();
}
 
Example #8
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * This test demonstrates that 
 * {@link INodeDirectory#removeChild}
 * and 
 * {@link INodeDirectory#addChild}
 * should use {@link INode#isInLatestSnapshot} to check if the
 * added/removed child should be recorded in snapshots.
 */
@Test
public void testRenameDirAndDeleteSnapshot_5() throws Exception {
  final Path dir1 = new Path("/dir1");
  final Path dir2 = new Path("/dir2");
  final Path dir3 = new Path("/dir3");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(dir2);
  hdfs.mkdirs(dir3);
  
  final Path foo = new Path(dir1, "foo");
  hdfs.mkdirs(foo);
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  final Path bar = new Path(foo, "bar");
  // create file bar, and foo will become an INodeDirectory with snapshot
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  // delete snapshot s1. now foo is not in any snapshot
  hdfs.deleteSnapshot(dir1, "s1");
  
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  // rename /dir1/foo to /dir2/foo
  final Path foo2 = new Path(dir2, foo.getName());
  hdfs.rename(foo, foo2);
  // rename /dir2/foo/bar to /dir3/foo/bar
  final Path bar2 = new Path(dir2, "foo/bar");
  final Path bar3 = new Path(dir3, "bar");
  hdfs.rename(bar2, bar3);
  
  // delete /dir2/foo. Since it is not in any snapshot, we will call its 
  // destroy function. If we do not use isInLatestSnapshot in removeChild and
  // addChild methods in INodeDirectory (with snapshot), the file bar will be 
  // stored in the deleted list of foo, and will be destroyed.
  hdfs.delete(foo2, true);
  
  // check if /dir3/bar still exists
  assertTrue(hdfs.exists(bar3));
  INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString());
  assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent());
}
 
Example #9
Source File: TestSnapshotBlocksMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure we delete 0-sized block when deleting an under-construction file
 */
@Test
public void testDeletionWithZeroSizeBlock2() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(subDir, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
Example #10
Source File: TestSnapshotBlocksMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * 1. rename under-construction file with 0-sized blocks after snapshot.
 * 2. delete the renamed directory.
 * make sure we delete the 0-sized block.
 * see HDFS-5476.
 */
@Test
public void testDeletionWithZeroSizeBlock3() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  // rename bar
  final Path bar2 = new Path(subDir, "bar2");
  hdfs.rename(bar, bar2);
  
  INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
  blks = bar2Node.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  // delete subDir
  hdfs.delete(subDir, true);
  
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
Example #11
Source File: TestSnapshotBlocksMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure that a delete of a non-zero-length file which results in a
 * zero-length file in a snapshot works.
 */
@Test
public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  final byte[] testData = "foo bar baz".getBytes();
  
  // Create a zero-length file.
  DFSTestUtil.createFile(hdfs, bar, 0, REPLICATION, 0L);
  assertEquals(0, fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);

  // Create a snapshot that includes that file.
  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  
  // Extend that file.
  FSDataOutputStream out = hdfs.append(bar);
  out.write(testData);
  out.close();
  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(testData.length, blks[0].getNumBytes());
  
  // Delete the file.
  hdfs.delete(bar, true);
  
  // Now make sure that the NN can still save an fsimage successfully.
  cluster.getNameNode().getRpcServer().setSafeMode(
      SafeModeAction.SAFEMODE_ENTER, false);
  cluster.getNameNode().getRpcServer().saveNamespace();
}
 
Example #12
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Rename a single file across snapshottable dirs.
 */
@Test (timeout=60000)
public void testRenameFileAcrossSnapshottableDirs() throws Exception {
  final Path sdir1 = new Path("/dir1");
  final Path sdir2 = new Path("/dir2");
  hdfs.mkdirs(sdir1);
  hdfs.mkdirs(sdir2);
  final Path foo = new Path(sdir2, "foo");
  DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
  hdfs.createSnapshot(sdir1, "s3");
  
  final Path newfoo = new Path(sdir1, "foo");
  hdfs.rename(foo, newfoo);
  
  // change the replication factor of foo
  hdfs.setReplication(newfoo, REPL_1);
  
  // /dir2/.snapshot/s2/foo should still work
  final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
      "foo");
  assertTrue(hdfs.exists(foo_s2));
  FileStatus status = hdfs.getFileStatus(foo_s2);
  assertEquals(REPL, status.getReplication());
  
  final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
      "foo");
  assertFalse(hdfs.exists(foo_s3));
  INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
  Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
  INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
  assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
}
 
Example #13
Source File: TestSnapshotReplication.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test replication for a file with snapshots, also including the scenario
 * where the original file is deleted
 */
@Test (timeout=60000)
public void testReplicationAfterDeletion() throws Exception {
  // Create file1, set its replication to 3
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
  Map<Path, Short> snapshotRepMap = new HashMap<Path, Short>();
  // Take 3 snapshots of sub1
  for (int i = 1; i <= 3; i++) {
    Path root = SnapshotTestHelper.createSnapshot(hdfs, sub1, "s" + i);
    Path ssFile = new Path(root, file1.getName());
    snapshotRepMap.put(ssFile, REPLICATION);
  }
  // Check replication
  checkFileReplication(file1, REPLICATION, REPLICATION);
  checkSnapshotFileReplication(file1, snapshotRepMap, REPLICATION);
  
  // Delete file1
  hdfs.delete(file1, true);
  // Check replication of snapshots
  for (Path ss : snapshotRepMap.keySet()) {
    final INodeFile ssInode = getINodeFile(ss);
    // The replication number derived from the
    // INodeFileWithLink#getBlockReplication should always == expectedBlockRep
    assertEquals(REPLICATION, ssInode.getBlockReplication());
    // Also check the number derived from INodeFile#getFileReplication
    assertEquals(snapshotRepMap.get(ss).shortValue(),
        ssInode.getFileReplication());
  }
}
 
Example #14
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * This test demonstrates that 
 * {@link INodeDirectory#removeChild}
 * and 
 * {@link INodeDirectory#addChild}
 * should use {@link INode#isInLatestSnapshot} to check if the
 * added/removed child should be recorded in snapshots.
 */
@Test
public void testRenameDirAndDeleteSnapshot_5() throws Exception {
  final Path dir1 = new Path("/dir1");
  final Path dir2 = new Path("/dir2");
  final Path dir3 = new Path("/dir3");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(dir2);
  hdfs.mkdirs(dir3);
  
  final Path foo = new Path(dir1, "foo");
  hdfs.mkdirs(foo);
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  final Path bar = new Path(foo, "bar");
  // create file bar, and foo will become an INodeDirectory with snapshot
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  // delete snapshot s1. now foo is not in any snapshot
  hdfs.deleteSnapshot(dir1, "s1");
  
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  // rename /dir1/foo to /dir2/foo
  final Path foo2 = new Path(dir2, foo.getName());
  hdfs.rename(foo, foo2);
  // rename /dir2/foo/bar to /dir3/foo/bar
  final Path bar2 = new Path(dir2, "foo/bar");
  final Path bar3 = new Path(dir3, "bar");
  hdfs.rename(bar2, bar3);
  
  // delete /dir2/foo. Since it is not in any snapshot, we will call its 
  // destroy function. If we do not use isInLatestSnapshot in removeChild and
  // addChild methods in INodeDirectory (with snapshot), the file bar will be 
  // stored in the deleted list of foo, and will be destroyed.
  hdfs.delete(foo2, true);
  
  // check if /dir3/bar still exists
  assertTrue(hdfs.exists(bar3));
  INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString());
  assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent());
}
 
Example #15
Source File: FileDiff.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
QuotaCounts destroyDiffAndCollectBlocks(BlockStoragePolicySuite bsps, INodeFile currentINode,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  return currentINode.getFileWithSnapshotFeature()
      .updateQuotaAndCollectBlocks(bsps, currentINode, this, collectedBlocks,
          removedINodes);
}
 
Example #16
Source File: TestSnapshotReplication.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Check the replication of a given file. We test both
 * {@link INodeFile#getFileReplication()} and
 * {@link INodeFile#getBlockReplication()}.
 *
 * @param file The given file
 * @param replication The expected replication number
 * @param blockReplication The expected replication number for the block
 * @throws Exception
 */
private void checkFileReplication(Path file, short replication,
    short blockReplication) throws Exception {
  // Get FileStatus of file1, and identify the replication number of file1.
  // Note that the replication number in FileStatus was derived from
  // INodeFile#getFileReplication().
  short fileReplication = hdfs.getFileStatus(file1).getReplication();
  assertEquals(replication, fileReplication);
  // Check the correctness of getBlockReplication()
  INode inode = fsdir.getINode(file1.toString());
  assertTrue(inode instanceof INodeFile);
  assertEquals(blockReplication, ((INodeFile) inode).getBlockReplication());
}
 
Example #17
Source File: FileDiff.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    BlockStoragePolicySuite bsps, INodeFile currentINode,
    FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  FileWithSnapshotFeature sf = currentINode.getFileWithSnapshotFeature();
  assert sf != null : "FileWithSnapshotFeature is null";
  return sf.updateQuotaAndCollectBlocks(
      bsps, currentINode, posterior, collectedBlocks, removedINodes);
}
 
Example #18
Source File: FSImageFormatPBSnapshot.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void serializeFileDiffList(INodeFile file, OutputStream out)
    throws IOException {
  FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
  if (sf != null) {
    List<FileDiff> diffList = sf.getDiffs().asList();
    SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
        .newBuilder().setInodeId(file.getId()).setType(Type.FILEDIFF)
        .setNumOfDiff(diffList.size()).build();
    entry.writeDelimitedTo(out);
    for (int i = diffList.size() - 1; i >= 0; i--) {
      FileDiff diff = diffList.get(i);
      SnapshotDiffSection.FileDiff.Builder fb = SnapshotDiffSection.FileDiff
          .newBuilder().setSnapshotId(diff.getSnapshotId())
          .setFileSize(diff.getFileSize());
      if(diff.getBlocks() != null) {
        for(Block block : diff.getBlocks()) {
          fb.addBlocks(PBHelper.convert(block));
        }
      }
      INodeFileAttributes copy = diff.snapshotINode;
      if (copy != null) {
        fb.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
            .setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext()));
      }
      fb.build().writeDelimitedTo(out);
    }
  }
}
 
Example #19
Source File: TestSnapshotReplication.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Check the replication of a given file. We test both
 * {@link INodeFile#getFileReplication()} and
 * {@link INodeFile#getBlockReplication()}.
 *
 * @param file The given file
 * @param replication The expected replication number
 * @param blockReplication The expected replication number for the block
 * @throws Exception
 */
private void checkFileReplication(Path file, short replication,
    short blockReplication) throws Exception {
  // Get FileStatus of file1, and identify the replication number of file1.
  // Note that the replication number in FileStatus was derived from
  // INodeFile#getFileReplication().
  short fileReplication = hdfs.getFileStatus(file1).getReplication();
  assertEquals(replication, fileReplication);
  // Check the correctness of getBlockReplication()
  INode inode = fsdir.getINode(file1.toString());
  assertTrue(inode instanceof INodeFile);
  assertEquals(blockReplication, ((INodeFile) inode).getBlockReplication());
}
 
Example #20
Source File: TestSnapshotBlocksMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
 */
@Test
public void testDeletionWithZeroSizeBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(bar, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1",
      bar.getName());
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
Example #21
Source File: TestSnapshotBlocksMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * 1. rename under-construction file with 0-sized blocks after snapshot.
 * 2. delete the renamed directory.
 * make sure we delete the 0-sized block.
 * see HDFS-5476.
 */
@Test
public void testDeletionWithZeroSizeBlock3() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  // rename bar
  final Path bar2 = new Path(subDir, "bar2");
  hdfs.rename(bar, bar2);
  
  INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
  blks = bar2Node.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  // delete subDir
  hdfs.delete(subDir, true);
  
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
Example #22
Source File: TestSnapshotBlocksMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfoContiguous b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
 
Example #23
Source File: FileDiffList.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void saveSelf2Snapshot(int latestSnapshotId, INodeFile iNodeFile,
    INodeFileAttributes snapshotCopy, boolean withBlocks) {
  final FileDiff diff =
      super.saveSelf2Snapshot(latestSnapshotId, iNodeFile, snapshotCopy);
  if(withBlocks)  // Store blocks if this is the first update
    diff.setBlocks(iNodeFile.getBlocks());
}
 
Example #24
Source File: FileWithSnapshotFeature.java    From big-c with Apache License 2.0 5 votes vote down vote up
boolean changedBetweenSnapshots(INodeFile file, Snapshot from, Snapshot to) {
  int[] diffIndexPair = diffs.changedBetweenSnapshots(from, to);
  if (diffIndexPair == null) {
    return false;
  }
  int earlierDiffIndex = diffIndexPair[0];
  int laterDiffIndex = diffIndexPair[1];

  final List<FileDiff> diffList = diffs.asList();
  final long earlierLength = diffList.get(earlierDiffIndex).getFileSize();
  final long laterLength = laterDiffIndex == diffList.size() ? file
      .computeFileSize(true, false) : diffList.get(laterDiffIndex)
      .getFileSize();
  if (earlierLength != laterLength) { // file length has been changed
    return true;
  }

  INodeFileAttributes earlierAttr = null; // check the metadata
  for (int i = earlierDiffIndex; i < laterDiffIndex; i++) {
    FileDiff diff = diffList.get(i);
    if (diff.snapshotINode != null) {
      earlierAttr = diff.snapshotINode;
      break;
    }
  }
  if (earlierAttr == null) { // no meta-change at all, return false
    return false;
  }
  INodeFileAttributes laterAttr = diffs.getSnapshotINode(
      Math.max(Snapshot.getSnapshotId(from), Snapshot.getSnapshotId(to)),
      file);
  return !earlierAttr.metadataEquals(laterAttr);
}
 
Example #25
Source File: TestSnapshotDeletion.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Delete a snapshot that is taken before a directory deletion (recursively),
 * directory diff list should be combined correctly.
 */
@Test (timeout=60000)
public void testDeleteSnapshot2() throws Exception {
  final Path root = new Path("/");

  Path dir = new Path("/dir1");
  Path file1 = new Path(dir, "file1");
  DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);

  hdfs.allowSnapshot(root);
  hdfs.createSnapshot(root, "s1");

  Path file2 = new Path(dir, "file2");
  DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
  INodeFile file2Node = fsdir.getINode(file2.toString()).asFile();
  long file2NodeId = file2Node.getId();

  hdfs.createSnapshot(root, "s2");

  // delete directory recursively
  assertTrue(hdfs.delete(dir, true));
  assertNotNull(fsdir.getInode(file2NodeId));

  // delete second snapshot
  hdfs.deleteSnapshot(root, "s2");
  assertTrue(fsdir.getInode(file2NodeId) == null);

  NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
  NameNodeAdapter.saveNamespace(cluster.getNameNode());

  // restart NN
  cluster.restartNameNodes();
}
 
Example #26
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  INodeFile fileNode = cluster.getNamesystem(0).getFSDirectory()
      .getINode4Write(file).asFile();
  BlockInfoContiguousUnderConstruction blkUC =
      (BlockInfoContiguousUnderConstruction) (fileNode.getBlocks())[1];
  int datanodeNum = blkUC.getExpectedStorageLocations().length;
  for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) {
    Thread.sleep(1000);
    datanodeNum = blkUC.getExpectedStorageLocations().length;
  }
  return datanodeNum == 2;
}
 
Example #27
Source File: FileDiff.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
QuotaCounts combinePosteriorAndCollectBlocks(
    BlockStoragePolicySuite bsps, INodeFile currentINode,
    FileDiff posterior, BlocksMapUpdateInfo collectedBlocks,
    final List<INode> removedINodes) {
  FileWithSnapshotFeature sf = currentINode.getFileWithSnapshotFeature();
  assert sf != null : "FileWithSnapshotFeature is null";
  return sf.updateQuotaAndCollectBlocks(
      bsps, currentINode, posterior, collectedBlocks, removedINodes);
}
 
Example #28
Source File: FileDiff.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
QuotaCounts destroyDiffAndCollectBlocks(BlockStoragePolicySuite bsps, INodeFile currentINode,
    BlocksMapUpdateInfo collectedBlocks, final List<INode> removedINodes) {
  return currentINode.getFileWithSnapshotFeature()
      .updateQuotaAndCollectBlocks(bsps, currentINode, this, collectedBlocks,
          removedINodes);
}
 
Example #29
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  INodeFile fileNode = cluster.getNameNode(0).getNamesystem()
      .getFSDirectory().getINode4Write(fileName).asFile();
  boolean fileIsUC = fileNode.isUnderConstruction();
  for (int i = 0; i < CHECKTIMES && !fileIsUC; i++) {
    Thread.sleep(1000);
    fileNode = cluster.getNameNode(0).getNamesystem().getFSDirectory()
        .getINode4Write(fileName).asFile();
    fileIsUC = fileNode.isUnderConstruction();
  }
  return fileIsUC;
}
 
Example #30
Source File: FSImageFormatPBSnapshot.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void serializeFileDiffList(INodeFile file, OutputStream out)
    throws IOException {
  FileWithSnapshotFeature sf = file.getFileWithSnapshotFeature();
  if (sf != null) {
    List<FileDiff> diffList = sf.getDiffs().asList();
    SnapshotDiffSection.DiffEntry entry = SnapshotDiffSection.DiffEntry
        .newBuilder().setInodeId(file.getId()).setType(Type.FILEDIFF)
        .setNumOfDiff(diffList.size()).build();
    entry.writeDelimitedTo(out);
    for (int i = diffList.size() - 1; i >= 0; i--) {
      FileDiff diff = diffList.get(i);
      SnapshotDiffSection.FileDiff.Builder fb = SnapshotDiffSection.FileDiff
          .newBuilder().setSnapshotId(diff.getSnapshotId())
          .setFileSize(diff.getFileSize());
      if(diff.getBlocks() != null) {
        for(Block block : diff.getBlocks()) {
          fb.addBlocks(PBHelper.convert(block));
        }
      }
      INodeFileAttributes copy = diff.snapshotINode;
      if (copy != null) {
        fb.setName(ByteString.copyFrom(copy.getLocalNameBytes()))
            .setSnapshotCopy(buildINodeFile(copy, parent.getSaverContext()));
      }
      fb.build().writeDelimitedTo(out);
    }
  }
}