org.apache.hadoop.hdfs.server.namenode.FSDirectory Java Examples

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.FSDirectory. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestQuota.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Test HDFS operations that change disk space consumed by a directory tree.
 * namely create, rename, delete, append, and setReplication.
 * 
 * This is based on testNamespaceCommands() above.
 */
public void testSpaceCommands() throws Exception {
  final Configuration conf = new Configuration();
  // set a smaller block size so that we can test with smaller 
  // diskspace quotas
  conf.set("dfs.block.size", "512");
  conf.setBoolean("dfs.support.append", true);
  final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
  final FileSystem fs = cluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
              fs instanceof DistributedFileSystem);

  final DistributedFileSystem dfs = (DistributedFileSystem)fs;
  FSDirectory fsd = cluster.getNameNode().namesystem.dir;
INodeDirectoryWithQuota rootDir = (INodeDirectoryWithQuota) (fsd
		.getExistingPathINodes("/")[0]);
  try {
    generateFiles(dfs, rootDir, 1024, 512);
    generateFiles(dfs, rootDir, 1019, 512);
  } finally {
    cluster.shutdown();
  }
}
 
Example #2
Source File: TestNestedSnapshots.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * When we have nested snapshottable directories and if we try to reset the
 * snapshottable descendant back to an regular directory, we need to replace
 * the snapshottable descendant with an INodeDirectoryWithSnapshot
 */
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  final Path dir = new Path("/dir");
  final Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);
  
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  final Path file = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
  
  FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
  INode subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());
  
  hdfs.allowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());
  
  hdfs.disallowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());
}
 
Example #3
Source File: TestNestedSnapshots.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * When we have nested snapshottable directories and if we try to reset the
 * snapshottable descendant back to an regular directory, we need to replace
 * the snapshottable descendant with an INodeDirectoryWithSnapshot
 */
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
  cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);

  final Path dir = new Path("/dir");
  final Path sub = new Path(dir, "sub");
  hdfs.mkdirs(sub);
  
  SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
  final Path file = new Path(sub, "file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
  
  FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
  INode subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());
  
  hdfs.allowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());
  
  hdfs.disallowSnapshot(sub);
  subNode = fsdir.getINode(sub.toString());
  assertTrue(subNode.asDirectory().isWithSnapshot());
}
 
Example #4
Source File: DFSUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Whether the pathname is valid.  Currently prohibits relative paths, 
 * names which contain a ":" or "//", or other non-canonical paths.
 */
public static boolean isValidName(String src) {
  // Path must be absolute.
  if (!src.startsWith(Path.SEPARATOR)) {
    return false;
  }
    
  // Check for ".." "." ":" "/"
  String[] components = StringUtils.split(src, '/');
  for (int i = 0; i < components.length; i++) {
    String element = components[i];
    if (element.equals(".")  ||
        (element.indexOf(":") >= 0)  ||
        (element.indexOf("/") >= 0)) {
      return false;
    }
    // ".." is allowed in path starting with /.reserved/.inodes
    if (element.equals("..")) {
      if (components.length > 4
          && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
          && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
        continue;
      }
      return false;
    }
    // The string may start or end with a /, but not have
    // "//" in the middle.
    if (element.isEmpty() && i != components.length - 1 &&
        i != 0) {
      return false;
    }
  }
  return true;
}
 
Example #5
Source File: TestSnapshotBlocksMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfoContiguous b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
 
Example #6
Source File: DFSUtil.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Whether the pathname is valid.  Currently prohibits relative paths, 
 * names which contain a ":" or "//", or other non-canonical paths.
 */
public static boolean isValidName(String src) {
  // Path must be absolute.
  if (!src.startsWith(Path.SEPARATOR)) {
    return false;
  }
    
  // Check for ".." "." ":" "/"
  String[] components = StringUtils.split(src, '/');
  for (int i = 0; i < components.length; i++) {
    String element = components[i];
    if (element.equals(".")  ||
        (element.indexOf(":") >= 0)  ||
        (element.indexOf("/") >= 0)) {
      return false;
    }
    // ".." is allowed in path starting with /.reserved/.inodes
    if (element.equals("..")) {
      if (components.length > 4
          && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
          && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
        continue;
      }
      return false;
    }
    // The string may start or end with a /, but not have
    // "//" in the middle.
    if (element.isEmpty() && i != components.length - 1 &&
        i != 0) {
      return false;
    }
  }
  return true;
}
 
Example #7
Source File: TestSnapshotBlocksMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfoContiguous b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
 
Example #8
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test rename while the rename operation will exceed the quota in the dst
 * tree.
 */
@Test
public void testRenameUndo_5() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path subdir2 = new Path(dir2, "subdir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subdir2);
  
  final Path foo = new Path(dir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  
  // set ns quota of dir2 to 4, so the current remaining is 2 (already has
  // dir2, and subdir2)
  hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
  
  final Path foo2 = new Path(subdir2, foo.getName());
  FSDirectory fsdir2 = Mockito.spy(fsdir);
  Mockito.doThrow(new NSQuotaExceededException("fake exception")).when(fsdir2)
      .addLastINode((INodesInPath) Mockito.anyObject(),
          (INode) Mockito.anyObject(), Mockito.anyBoolean());
  Whitebox.setInternalState(fsn, "dir", fsdir2);
  // rename /test/dir1/foo to /test/dir2/subdir2/foo. 
  // FSDirectory#verifyQuota4Rename will pass since the remaining quota is 2.
  // However, the rename operation will fail since we let addLastINode throw
  // NSQuotaExceededException
  boolean rename = hdfs.rename(foo, foo2);
  assertFalse(rename);
  
  // check the undo
  assertTrue(hdfs.exists(foo));
  assertTrue(hdfs.exists(bar));
  INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.asDirectory().isWithSnapshot());
  INode barNode = fsdir2.getINode4Write(bar.toString());
  assertTrue(barNode.getClass() == INodeFile.class);
  assertSame(fooNode, barNode.getParent());
  List<DirectoryDiff> diffList = dir1Node
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  
  // check dir2
  INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
  assertTrue(dir2Node.isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
  assertEquals(2, counts.getNameSpace());
  assertEquals(0, counts.getStorageSpace());
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir2.getINode4Write(subdir2.toString()));
  diffList = dir2Node.getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
 
Example #9
Source File: TestRenameWithSnapshots.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test the rename undo when removing dst node fails
 */
@Test
public void testRenameUndo_6() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path sub_dir2 = new Path(dir2, "subdir");
  final Path subsub_dir2 = new Path(sub_dir2, "subdir");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subsub_dir2);
  
  final Path foo = new Path(dir1, "foo");
  hdfs.mkdirs(foo);
  
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  
  // set ns quota of dir2 to 4, so the current remaining is 1 (already has
  // dir2, sub_dir2, and subsub_dir2)
  hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
  FSDirectory fsdir2 = Mockito.spy(fsdir);
  Mockito.doThrow(new RuntimeException("fake exception")).when(fsdir2)
      .removeLastINode((INodesInPath) Mockito.anyObject());
  Whitebox.setInternalState(fsn, "dir", fsdir2);
  // rename /test/dir1/foo to /test/dir2/sub_dir2/subsub_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. However, the rename operation will fail when removing
  // subsub_dir2.
  try {
    hdfs.rename(foo, subsub_dir2, Rename.OVERWRITE);
    fail("Expect QuotaExceedException");
  } catch (Exception e) {
    String msg = "fake exception";
    GenericTestUtils.assertExceptionContains(msg, e);
  }
  
  // check the undo
  assertTrue(hdfs.exists(foo));
  INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.asDirectory().isWithSnapshot());
  assertSame(dir1Node, fooNode.getParent());
  List<DirectoryDiff> diffList = dir1Node
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  
  // check dir2
  INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
  assertTrue(dir2Node.isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
  assertEquals(3, counts.getNameSpace());
  assertEquals(0, counts.getStorageSpace());
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir2.getINode4Write(sub_dir2.toString()));
  INode subsubdir2Node = fsdir2.getINode4Write(subsub_dir2.toString());
  assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
  assertSame(subdir2Node, subsubdir2Node.getParent());
  
  diffList = (  dir2Node).getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
 
Example #10
Source File: SnapshotTestHelper.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public static void dumpTree2File(FSDirectory fsdir, File f) throws IOException{
  final PrintWriter out = new PrintWriter(new FileWriter(f, false), true);
  fsdir.getINode("/").dumpTreeRecursively(out, new StringBuilder(),
      Snapshot.CURRENT_STATE_ID);
  out.close();
}
 
Example #11
Source File: TestSnapshotDeletion.java    From hadoop with Apache License 2.0 4 votes vote down vote up
private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir)
    throws IOException {
  final String dirStr = dir.toString();
  return INodeDirectory.valueOf(fsdir.getINode(dirStr), dirStr);
}
 
Example #12
Source File: SnapshotManager.java    From big-c with Apache License 2.0 4 votes vote down vote up
public SnapshotManager(final FSDirectory fsdir) {
  this.fsdir = fsdir;
}
 
Example #13
Source File: CacheReplicationMonitor.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Scan all CacheDirectives.  Use the information to figure out
 * what cache replication factor each block should have.
 */
private void rescanCacheDirectives() {
  FSDirectory fsDir = namesystem.getFSDirectory();
  final long now = new Date().getTime();
  for (CacheDirective directive : cacheManager.getCacheDirectives()) {
    scannedDirectives++;
    // Skip processing this entry if it has expired
    if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
      LOG.debug("Directive {}: the directive expired at {} (now = {})",
           directive.getId(), directive.getExpiryTime(), now);
      continue;
    }
    String path = directive.getPath();
    INode node;
    try {
      node = fsDir.getINode(path);
    } catch (UnresolvedLinkException e) {
      // We don't cache through symlinks
      LOG.debug("Directive {}: got UnresolvedLinkException while resolving "
              + "path {}", directive.getId(), path
      );
      continue;
    }
    if (node == null)  {
      LOG.debug("Directive {}: No inode found at {}", directive.getId(),
          path);
    } else if (node.isDirectory()) {
      INodeDirectory dir = node.asDirectory();
      ReadOnlyList<INode> children = dir
          .getChildrenList(Snapshot.CURRENT_STATE_ID);
      for (INode child : children) {
        if (child.isFile()) {
          rescanFile(directive, child.asFile());
        }
      }
    } else if (node.isFile()) {
      rescanFile(directive, node.asFile());
    } else {
      LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ",
          directive.getId(), node);
    }
  }
}
 
Example #14
Source File: TestFileAppend4.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that an append with no locations fails with an exception
 * showing insufficient locations.
 */
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
  Configuration conf = new Configuration();

  // lower heartbeat interval for fast recognition of DN
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
      .build();
  DistributedFileSystem fileSystem = null;
  try {
    // create a file with replication 3
    fileSystem = cluster.getFileSystem();
    Path f = new Path("/testAppend");
    FSDataOutputStream create = fileSystem.create(f, (short) 2);
    create.write("/testAppend".getBytes());
    create.close();

    // Check for replications
    DFSTestUtil.waitReplication(fileSystem, f, (short) 2);

    // Shut down all DNs that have the last block location for the file
    LocatedBlocks lbs = fileSystem.dfs.getNamenode().
        getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
    List<DataNode> dnsOfCluster = cluster.getDataNodes();
    DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().
        getLocations();
    for( DataNode dn : dnsOfCluster) {
      for(DatanodeInfo loc: dnsWithLocations) {
        if(dn.getDatanodeId().equals(loc)){
          dn.shutdown();
          DFSTestUtil.waitForDatanodeDeath(dn);
        }
      }
    }

    // Wait till 0 replication is recognized
    DFSTestUtil.waitReplication(fileSystem, f, (short) 0);

    // Append to the file, at this state there are 3 live DNs but none of them
    // have the block.
    try{
      fileSystem.append(f);
      fail("Append should fail because insufficient locations");
    } catch (IOException e){
      LOG.info("Expected exception: ", e);
    }
    FSDirectory dir = cluster.getNamesystem().getFSDirectory();
    final INodeFile inode = INodeFile.
        valueOf(dir.getINode("/testAppend"), "/testAppend");
    assertTrue("File should remain closed", !inode.isUnderConstruction());
  } finally {
    if (null != fileSystem) {
      fileSystem.close();
    }
    cluster.shutdown();
  }
}
 
Example #15
Source File: TestSnapshotDeletion.java    From big-c with Apache License 2.0 4 votes vote down vote up
private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir)
    throws IOException {
  final String dirStr = dir.toString();
  return INodeDirectory.valueOf(fsdir.getINode(dirStr), dirStr);
}
 
Example #16
Source File: TestFileAppend4.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that an append with no locations fails with an exception
 * showing insufficient locations.
 */
@Test(timeout = 60000)
public void testAppendInsufficientLocations() throws Exception {
  Configuration conf = new Configuration();

  // lower heartbeat interval for fast recognition of DN
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
      1000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 3000);

  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4)
      .build();
  DistributedFileSystem fileSystem = null;
  try {
    // create a file with replication 3
    fileSystem = cluster.getFileSystem();
    Path f = new Path("/testAppend");
    FSDataOutputStream create = fileSystem.create(f, (short) 2);
    create.write("/testAppend".getBytes());
    create.close();

    // Check for replications
    DFSTestUtil.waitReplication(fileSystem, f, (short) 2);

    // Shut down all DNs that have the last block location for the file
    LocatedBlocks lbs = fileSystem.dfs.getNamenode().
        getBlockLocations("/testAppend", 0, Long.MAX_VALUE);
    List<DataNode> dnsOfCluster = cluster.getDataNodes();
    DatanodeInfo[] dnsWithLocations = lbs.getLastLocatedBlock().
        getLocations();
    for( DataNode dn : dnsOfCluster) {
      for(DatanodeInfo loc: dnsWithLocations) {
        if(dn.getDatanodeId().equals(loc)){
          dn.shutdown();
          DFSTestUtil.waitForDatanodeDeath(dn);
        }
      }
    }

    // Wait till 0 replication is recognized
    DFSTestUtil.waitReplication(fileSystem, f, (short) 0);

    // Append to the file, at this state there are 3 live DNs but none of them
    // have the block.
    try{
      fileSystem.append(f);
      fail("Append should fail because insufficient locations");
    } catch (IOException e){
      LOG.info("Expected exception: ", e);
    }
    FSDirectory dir = cluster.getNamesystem().getFSDirectory();
    final INodeFile inode = INodeFile.
        valueOf(dir.getINode("/testAppend"), "/testAppend");
    assertTrue("File should remain closed", !inode.isUnderConstruction());
  } finally {
    if (null != fileSystem) {
      fileSystem.close();
    }
    cluster.shutdown();
  }
}
 
Example #17
Source File: CacheReplicationMonitor.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Scan all CacheDirectives.  Use the information to figure out
 * what cache replication factor each block should have.
 */
private void rescanCacheDirectives() {
  FSDirectory fsDir = namesystem.getFSDirectory();
  final long now = new Date().getTime();
  for (CacheDirective directive : cacheManager.getCacheDirectives()) {
    scannedDirectives++;
    // Skip processing this entry if it has expired
    if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
      LOG.debug("Directive {}: the directive expired at {} (now = {})",
           directive.getId(), directive.getExpiryTime(), now);
      continue;
    }
    String path = directive.getPath();
    INode node;
    try {
      node = fsDir.getINode(path);
    } catch (UnresolvedLinkException e) {
      // We don't cache through symlinks
      LOG.debug("Directive {}: got UnresolvedLinkException while resolving "
              + "path {}", directive.getId(), path
      );
      continue;
    }
    if (node == null)  {
      LOG.debug("Directive {}: No inode found at {}", directive.getId(),
          path);
    } else if (node.isDirectory()) {
      INodeDirectory dir = node.asDirectory();
      ReadOnlyList<INode> children = dir
          .getChildrenList(Snapshot.CURRENT_STATE_ID);
      for (INode child : children) {
        if (child.isFile()) {
          rescanFile(directive, child.asFile());
        }
      }
    } else if (node.isFile()) {
      rescanFile(directive, node.asFile());
    } else {
      LOG.debug("Directive {}: ignoring non-directive, non-file inode {} ",
          directive.getId(), node);
    }
  }
}
 
Example #18
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test rename while the rename operation will exceed the quota in the dst
 * tree.
 */
@Test
public void testRenameUndo_5() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path subdir2 = new Path(dir2, "subdir2");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subdir2);
  
  final Path foo = new Path(dir1, "foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
  
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  
  // set ns quota of dir2 to 4, so the current remaining is 2 (already has
  // dir2, and subdir2)
  hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
  
  final Path foo2 = new Path(subdir2, foo.getName());
  FSDirectory fsdir2 = Mockito.spy(fsdir);
  Mockito.doThrow(new NSQuotaExceededException("fake exception")).when(fsdir2)
      .addLastINode((INodesInPath) Mockito.anyObject(),
          (INode) Mockito.anyObject(), Mockito.anyBoolean());
  Whitebox.setInternalState(fsn, "dir", fsdir2);
  // rename /test/dir1/foo to /test/dir2/subdir2/foo. 
  // FSDirectory#verifyQuota4Rename will pass since the remaining quota is 2.
  // However, the rename operation will fail since we let addLastINode throw
  // NSQuotaExceededException
  boolean rename = hdfs.rename(foo, foo2);
  assertFalse(rename);
  
  // check the undo
  assertTrue(hdfs.exists(foo));
  assertTrue(hdfs.exists(bar));
  INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.asDirectory().isWithSnapshot());
  INode barNode = fsdir2.getINode4Write(bar.toString());
  assertTrue(barNode.getClass() == INodeFile.class);
  assertSame(fooNode, barNode.getParent());
  List<DirectoryDiff> diffList = dir1Node
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  
  // check dir2
  INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
  assertTrue(dir2Node.isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
  assertEquals(2, counts.getNameSpace());
  assertEquals(0, counts.getStorageSpace());
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir2.getINode4Write(subdir2.toString()));
  diffList = dir2Node.getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
 
Example #19
Source File: TestRenameWithSnapshots.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test the rename undo when removing dst node fails
 */
@Test
public void testRenameUndo_6() throws Exception {
  final Path test = new Path("/test");
  final Path dir1 = new Path(test, "dir1");
  final Path dir2 = new Path(test, "dir2");
  final Path sub_dir2 = new Path(dir2, "subdir");
  final Path subsub_dir2 = new Path(sub_dir2, "subdir");
  hdfs.mkdirs(dir1);
  hdfs.mkdirs(subsub_dir2);
  
  final Path foo = new Path(dir1, "foo");
  hdfs.mkdirs(foo);
  
  SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
  SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
  
  // set ns quota of dir2 to 4, so the current remaining is 1 (already has
  // dir2, sub_dir2, and subsub_dir2)
  hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
  FSDirectory fsdir2 = Mockito.spy(fsdir);
  Mockito.doThrow(new RuntimeException("fake exception")).when(fsdir2)
      .removeLastINode((INodesInPath) Mockito.anyObject());
  Whitebox.setInternalState(fsn, "dir", fsdir2);
  // rename /test/dir1/foo to /test/dir2/sub_dir2/subsub_dir2. 
  // FSDirectory#verifyQuota4Rename will pass since foo only be counted 
  // as 1 in NS quota. However, the rename operation will fail when removing
  // subsub_dir2.
  try {
    hdfs.rename(foo, subsub_dir2, Rename.OVERWRITE);
    fail("Expect QuotaExceedException");
  } catch (Exception e) {
    String msg = "fake exception";
    GenericTestUtils.assertExceptionContains(msg, e);
  }
  
  // check the undo
  assertTrue(hdfs.exists(foo));
  INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
      .asDirectory();
  List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode fooNode = childrenList.get(0);
  assertTrue(fooNode.asDirectory().isWithSnapshot());
  assertSame(dir1Node, fooNode.getParent());
  List<DirectoryDiff> diffList = dir1Node
      .getDiffs().asList();
  assertEquals(1, diffList.size());
  DirectoryDiff diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
  
  // check dir2
  INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
  assertTrue(dir2Node.isSnapshottable());
  QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
  assertEquals(3, counts.getNameSpace());
  assertEquals(0, counts.getStorageSpace());
  childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
      .getChildrenList(Snapshot.CURRENT_STATE_ID));
  assertEquals(1, childrenList.size());
  INode subdir2Node = childrenList.get(0);
  assertSame(dir2Node, subdir2Node.getParent());
  assertSame(subdir2Node, fsdir2.getINode4Write(sub_dir2.toString()));
  INode subsubdir2Node = fsdir2.getINode4Write(subsub_dir2.toString());
  assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
  assertSame(subdir2Node, subsubdir2Node.getParent());
  
  diffList = (  dir2Node).getDiffs().asList();
  assertEquals(1, diffList.size());
  diff = diffList.get(0);
  assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
  assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
 
Example #20
Source File: SnapshotTestHelper.java    From big-c with Apache License 2.0 4 votes vote down vote up
public static void dumpTree2File(FSDirectory fsdir, File f) throws IOException{
  final PrintWriter out = new PrintWriter(new FileWriter(f, false), true);
  fsdir.getINode("/").dumpTreeRecursively(out, new StringBuilder(),
      Snapshot.CURRENT_STATE_ID);
  out.close();
}
 
Example #21
Source File: SnapshotManager.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public SnapshotManager(final FSDirectory fsdir) {
  this.fsdir = fsdir;
}