Java Code Examples for org.apache.hadoop.hdfs.server.namenode.INodeFile#getBlocks()

The following examples show how to use org.apache.hadoop.hdfs.server.namenode.INodeFile#getBlocks() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRetryCacheWithHA.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  INodeFile fileNode = cluster.getNamesystem(0).getFSDirectory()
      .getINode4Write(file).asFile();
  BlockInfoContiguousUnderConstruction blkUC =
      (BlockInfoContiguousUnderConstruction) (fileNode.getBlocks())[1];
  int datanodeNum = blkUC.getExpectedStorageLocations().length;
  for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) {
    Thread.sleep(1000);
    datanodeNum = blkUC.getExpectedStorageLocations().length;
  }
  return datanodeNum == 2;
}
 
Example 2
Source File: TestSnapshotBlocksMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfoContiguous b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
 
Example 3
Source File: TestSnapshotBlocksMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
 */
@Test
public void testDeletionWithZeroSizeBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(bar, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1",
      bar.getName());
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
Example 4
Source File: TestSnapshotBlocksMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure we delete 0-sized block when deleting an under-construction file
 */
@Test
public void testDeletionWithZeroSizeBlock2() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(subDir, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
Example 5
Source File: TestSnapshotBlocksMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * 1. rename under-construction file with 0-sized blocks after snapshot.
 * 2. delete the renamed directory.
 * make sure we delete the 0-sized block.
 * see HDFS-5476.
 */
@Test
public void testDeletionWithZeroSizeBlock3() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  // rename bar
  final Path bar2 = new Path(subDir, "bar2");
  hdfs.rename(bar, bar2);
  
  INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
  blks = bar2Node.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  // delete subDir
  hdfs.delete(subDir, true);
  
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
Example 6
Source File: TestSnapshotBlocksMap.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure that a delete of a non-zero-length file which results in a
 * zero-length file in a snapshot works.
 */
@Test
public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  final byte[] testData = "foo bar baz".getBytes();
  
  // Create a zero-length file.
  DFSTestUtil.createFile(hdfs, bar, 0, REPLICATION, 0L);
  assertEquals(0, fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);

  // Create a snapshot that includes that file.
  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  
  // Extend that file.
  FSDataOutputStream out = hdfs.append(bar);
  out.write(testData);
  out.close();
  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(testData.length, blks[0].getNumBytes());
  
  // Delete the file.
  hdfs.delete(bar, true);
  
  // Now make sure that the NN can still save an fsimage successfully.
  cluster.getNameNode().getRpcServer().setSafeMode(
      SafeModeAction.SAFEMODE_ENTER, false);
  cluster.getNameNode().getRpcServer().saveNamespace();
}
 
Example 7
Source File: TestRetryCacheWithHA.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
boolean checkNamenodeBeforeReturn() throws Exception {
  INodeFile fileNode = cluster.getNamesystem(0).getFSDirectory()
      .getINode4Write(file).asFile();
  BlockInfoContiguousUnderConstruction blkUC =
      (BlockInfoContiguousUnderConstruction) (fileNode.getBlocks())[1];
  int datanodeNum = blkUC.getExpectedStorageLocations().length;
  for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) {
    Thread.sleep(1000);
    datanodeNum = blkUC.getExpectedStorageLocations().length;
  }
  return datanodeNum == 2;
}
 
Example 8
Source File: TestSnapshotBlocksMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
static INodeFile assertBlockCollection(String path, int numBlocks,
   final FSDirectory dir, final BlockManager blkManager) throws Exception {
  final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
  assertEquals(numBlocks, file.getBlocks().length);
  for(BlockInfoContiguous b : file.getBlocks()) {
    assertBlockCollection(blkManager, file, b);
  }
  return file;
}
 
Example 9
Source File: TestSnapshotBlocksMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
 */
@Test
public void testDeletionWithZeroSizeBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(bar, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1",
      bar.getName());
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
Example 10
Source File: TestSnapshotBlocksMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure we delete 0-sized block when deleting an under-construction file
 */
@Test
public void testDeletionWithZeroSizeBlock2() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  barNode = fsdir.getINode4Write(bar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  hdfs.delete(subDir, true);
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
Example 11
Source File: TestSnapshotBlocksMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * 1. rename under-construction file with 0-sized blocks after snapshot.
 * 2. delete the renamed directory.
 * make sure we delete the 0-sized block.
 * see HDFS-5476.
 */
@Test
public void testDeletionWithZeroSizeBlock3() throws Exception {
  final Path foo = new Path("/foo");
  final Path subDir = new Path(foo, "sub");
  final Path bar = new Path(subDir, "bar");
  DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);

  hdfs.append(bar);

  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
  cluster.getNameNodeRpc()
      .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
          null, barNode.getId(), null);

  SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");

  // rename bar
  final Path bar2 = new Path(subDir, "bar2");
  hdfs.rename(bar, bar2);
  
  INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
  blks = bar2Node.getBlocks();
  assertEquals(2, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
  assertEquals(0, blks[1].getNumBytes());

  // delete subDir
  hdfs.delete(subDir, true);
  
  final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
  barNode = fsdir.getINode(sbar.toString()).asFile();
  blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
 
Example 12
Source File: TestSnapshotBlocksMap.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Make sure that a delete of a non-zero-length file which results in a
 * zero-length file in a snapshot works.
 */
@Test
public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
  final Path foo = new Path("/foo");
  final Path bar = new Path(foo, "bar");
  final byte[] testData = "foo bar baz".getBytes();
  
  // Create a zero-length file.
  DFSTestUtil.createFile(hdfs, bar, 0, REPLICATION, 0L);
  assertEquals(0, fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);

  // Create a snapshot that includes that file.
  SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
  
  // Extend that file.
  FSDataOutputStream out = hdfs.append(bar);
  out.write(testData);
  out.close();
  INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
  BlockInfoContiguous[] blks = barNode.getBlocks();
  assertEquals(1, blks.length);
  assertEquals(testData.length, blks[0].getNumBytes());
  
  // Delete the file.
  hdfs.delete(bar, true);
  
  // Now make sure that the NN can still save an fsimage successfully.
  cluster.getNameNode().getRpcServer().setSafeMode(
      SafeModeAction.SAFEMODE_ENTER, false);
  cluster.getNameNode().getRpcServer().saveNamespace();
}
 
Example 13
Source File: CacheReplicationMonitor.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Apply a CacheDirective to a file.
 * 
 * @param directive The CacheDirective to apply.
 * @param file The file.
 */
private void rescanFile(CacheDirective directive, INodeFile file) {
  BlockInfoContiguous[] blockInfos = file.getBlocks();

  // Increment the "needed" statistics
  directive.addFilesNeeded(1);
  // We don't cache UC blocks, don't add them to the total here
  long neededTotal = file.computeFileSizeNotIncludingLastUcBlock() *
      directive.getReplication();
  directive.addBytesNeeded(neededTotal);

  // The pool's bytesNeeded is incremented as we scan. If the demand
  // thus far plus the demand of this file would exceed the pool's limit,
  // do not cache this file.
  CachePool pool = directive.getPool();
  if (pool.getBytesNeeded() > pool.getLimit()) {
    LOG.debug("Directive {}: not scanning file {} because " +
        "bytesNeeded for pool {} is {}, but the pool's limit is {}",
        directive.getId(),
        file.getFullPathName(),
        pool.getPoolName(),
        pool.getBytesNeeded(),
        pool.getLimit());
    return;
  }

  long cachedTotal = 0;
  for (BlockInfoContiguous blockInfo : blockInfos) {
    if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
      // We don't try to cache blocks that are under construction.
      LOG.trace("Directive {}: can't cache block {} because it is in state "
              + "{}, not COMPLETE.", directive.getId(), blockInfo,
          blockInfo.getBlockUCState()
      );
      continue;
    }
    Block block = new Block(blockInfo.getBlockId());
    CachedBlock ncblock = new CachedBlock(block.getBlockId(),
        directive.getReplication(), mark);
    CachedBlock ocblock = cachedBlocks.get(ncblock);
    if (ocblock == null) {
      cachedBlocks.put(ncblock);
      ocblock = ncblock;
    } else {
      // Update bytesUsed using the current replication levels.
      // Assumptions: we assume that all the blocks are the same length
      // on each datanode.  We can assume this because we're only caching
      // blocks in state COMPLETE.
      // Note that if two directives are caching the same block(s), they will
      // both get them added to their bytesCached.
      List<DatanodeDescriptor> cachedOn =
          ocblock.getDatanodes(Type.CACHED);
      long cachedByBlock = Math.min(cachedOn.size(),
          directive.getReplication()) * blockInfo.getNumBytes();
      cachedTotal += cachedByBlock;

      if ((mark != ocblock.getMark()) ||
          (ocblock.getReplication() < directive.getReplication())) {
        //
        // Overwrite the block's replication and mark in two cases:
        //
        // 1. If the mark on the CachedBlock is different from the mark for
        // this scan, that means the block hasn't been updated during this
        // scan, and we should overwrite whatever is there, since it is no
        // longer valid.
        //
        // 2. If the replication in the CachedBlock is less than what the
        // directive asks for, we want to increase the block's replication
        // field to what the directive asks for.
        //
        ocblock.setReplicationAndMark(directive.getReplication(), mark);
      }
    }
    LOG.trace("Directive {}: setting replication for block {} to {}",
        directive.getId(), blockInfo, ocblock.getReplication());
  }
  // Increment the "cached" statistics
  directive.addBytesCached(cachedTotal);
  if (cachedTotal == neededTotal) {
    directive.addFilesCached(1);
  }
  LOG.debug("Directive {}: caching {}: {}/{} bytes", directive.getId(),
      file.getFullPathName(), cachedTotal, neededTotal);
}
 
Example 14
Source File: CacheReplicationMonitor.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Apply a CacheDirective to a file.
 * 
 * @param directive The CacheDirective to apply.
 * @param file The file.
 */
private void rescanFile(CacheDirective directive, INodeFile file) {
  BlockInfoContiguous[] blockInfos = file.getBlocks();

  // Increment the "needed" statistics
  directive.addFilesNeeded(1);
  // We don't cache UC blocks, don't add them to the total here
  long neededTotal = file.computeFileSizeNotIncludingLastUcBlock() *
      directive.getReplication();
  directive.addBytesNeeded(neededTotal);

  // The pool's bytesNeeded is incremented as we scan. If the demand
  // thus far plus the demand of this file would exceed the pool's limit,
  // do not cache this file.
  CachePool pool = directive.getPool();
  if (pool.getBytesNeeded() > pool.getLimit()) {
    LOG.debug("Directive {}: not scanning file {} because " +
        "bytesNeeded for pool {} is {}, but the pool's limit is {}",
        directive.getId(),
        file.getFullPathName(),
        pool.getPoolName(),
        pool.getBytesNeeded(),
        pool.getLimit());
    return;
  }

  long cachedTotal = 0;
  for (BlockInfoContiguous blockInfo : blockInfos) {
    if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
      // We don't try to cache blocks that are under construction.
      LOG.trace("Directive {}: can't cache block {} because it is in state "
              + "{}, not COMPLETE.", directive.getId(), blockInfo,
          blockInfo.getBlockUCState()
      );
      continue;
    }
    Block block = new Block(blockInfo.getBlockId());
    CachedBlock ncblock = new CachedBlock(block.getBlockId(),
        directive.getReplication(), mark);
    CachedBlock ocblock = cachedBlocks.get(ncblock);
    if (ocblock == null) {
      cachedBlocks.put(ncblock);
      ocblock = ncblock;
    } else {
      // Update bytesUsed using the current replication levels.
      // Assumptions: we assume that all the blocks are the same length
      // on each datanode.  We can assume this because we're only caching
      // blocks in state COMPLETE.
      // Note that if two directives are caching the same block(s), they will
      // both get them added to their bytesCached.
      List<DatanodeDescriptor> cachedOn =
          ocblock.getDatanodes(Type.CACHED);
      long cachedByBlock = Math.min(cachedOn.size(),
          directive.getReplication()) * blockInfo.getNumBytes();
      cachedTotal += cachedByBlock;

      if ((mark != ocblock.getMark()) ||
          (ocblock.getReplication() < directive.getReplication())) {
        //
        // Overwrite the block's replication and mark in two cases:
        //
        // 1. If the mark on the CachedBlock is different from the mark for
        // this scan, that means the block hasn't been updated during this
        // scan, and we should overwrite whatever is there, since it is no
        // longer valid.
        //
        // 2. If the replication in the CachedBlock is less than what the
        // directive asks for, we want to increase the block's replication
        // field to what the directive asks for.
        //
        ocblock.setReplicationAndMark(directive.getReplication(), mark);
      }
    }
    LOG.trace("Directive {}: setting replication for block {} to {}",
        directive.getId(), blockInfo, ocblock.getReplication());
  }
  // Increment the "cached" statistics
  directive.addBytesCached(cachedTotal);
  if (cachedTotal == neededTotal) {
    directive.addFilesCached(1);
  }
  LOG.debug("Directive {}: caching {}: {}/{} bytes", directive.getId(),
      file.getFullPathName(), cachedTotal, neededTotal);
}