Java Code Examples for org.apache.hadoop.hdfs.protocol.LocatedBlocks#getLastLocatedBlock()

The following examples show how to use org.apache.hadoop.hdfs.protocol.LocatedBlocks#getLastLocatedBlock() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PBHelper.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static LocatedBlocksProto convert(LocatedBlocks lb) {
  if (lb == null) {
    return null;
  }
  LocatedBlocksProto.Builder builder = 
      LocatedBlocksProto.newBuilder();
  if (lb.getLastLocatedBlock() != null) {
    builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock()));
  }
  if (lb.getFileEncryptionInfo() != null) {
    builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
  }
  return builder.setFileLength(lb.getFileLength())
      .setUnderConstruction(lb.isUnderConstruction())
      .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks()))
      .setIsLastBlockComplete(lb.isLastBlockComplete()).build();
}
 
Example 2
Source File: TestFileTruncate.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public static void checkBlockRecovery(Path p, DistributedFileSystem dfs,
    int attempts, long sleepMs) throws IOException {
  boolean success = false;
  for(int i = 0; i < attempts; i++) {
    LocatedBlocks blocks = getLocatedBlocks(p, dfs);
    boolean noLastBlock = blocks.getLastLocatedBlock() == null;
    if(!blocks.isUnderConstruction() &&
        (noLastBlock || blocks.isLastBlockComplete())) {
      success = true;
      break;
    }
    try { Thread.sleep(sleepMs); } catch (InterruptedException ignored) {}
  }
  assertThat("inode should complete in ~" + sleepMs * attempts + " ms.",
      success, is(true));
}
 
Example 3
Source File: PBHelper.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static LocatedBlocksProto convert(LocatedBlocks lb) {
  if (lb == null) {
    return null;
  }
  LocatedBlocksProto.Builder builder = 
      LocatedBlocksProto.newBuilder();
  if (lb.getLastLocatedBlock() != null) {
    builder.setLastBlock(PBHelper.convert(lb.getLastLocatedBlock()));
  }
  if (lb.getFileEncryptionInfo() != null) {
    builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
  }
  return builder.setFileLength(lb.getFileLength())
      .setUnderConstruction(lb.isUnderConstruction())
      .addAllBlocks(PBHelper.convertLocatedBlock2(lb.getLocatedBlocks()))
      .setIsLastBlockComplete(lb.isLastBlockComplete()).build();
}
 
Example 4
Source File: TestFileTruncate.java    From big-c with Apache License 2.0 6 votes vote down vote up
public static void checkBlockRecovery(Path p, DistributedFileSystem dfs,
    int attempts, long sleepMs) throws IOException {
  boolean success = false;
  for(int i = 0; i < attempts; i++) {
    LocatedBlocks blocks = getLocatedBlocks(p, dfs);
    boolean noLastBlock = blocks.getLastLocatedBlock() == null;
    if(!blocks.isUnderConstruction() &&
        (noLastBlock || blocks.isLastBlockComplete())) {
      success = true;
      break;
    }
    try { Thread.sleep(sleepMs); } catch (InterruptedException ignored) {}
  }
  assertThat("inode should complete in ~" + sleepMs * attempts + " ms.",
      success, is(true));
}
 
Example 5
Source File: TestBlockToken.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * This test writes a file and gets the block locations without closing the
 * file, and tests the block token in the last block. Block token is verified
 * by ensuring it is of correct kind.
 * 
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testBlockTokenInLastLocatedBlock() throws IOException,
    InterruptedException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1).build();
  cluster.waitActive();

  try {
    FileSystem fs = cluster.getFileSystem();
    String fileName = "/testBlockTokenInLastLocatedBlock";
    Path filePath = new Path(fileName);
    FSDataOutputStream out = fs.create(filePath, (short) 1);
    out.write(new byte[1000]);
    // ensure that the first block is written out (see FSOutputSummer#flush)
    out.flush();
    LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(
        fileName, 0, 1000);
    while (locatedBlocks.getLastLocatedBlock() == null) {
      Thread.sleep(100);
      locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0,
          1000);
    }
    Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock()
        .getBlockToken();
    Assert.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
    out.close();
  } finally {
    cluster.shutdown();
  }
}
 
Example 6
Source File: TestAbandonBlock.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
/** Abandon a block while creating a file */
public void testAbandonBlock() throws IOException {
  String src = FILE_NAME_PREFIX + "foo";

  // Start writing a file but do not close it
  FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)1, 512L);
  for (int i = 0; i < 1024; i++) {
    fout.write(123);
  }
  fout.hflush();
  long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();

  // Now abandon the last block
  DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
  LocatedBlocks blocks =
    dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
  int orginalNumBlocks = blocks.locatedBlockCount();
  LocatedBlock b = blocks.getLastLocatedBlock();
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);
  
  // call abandonBlock again to make sure the operation is idempotent
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);

  // And close the file
  fout.close();

  // Close cluster and check the block has been abandoned after restart
  cluster.restartNameNode();
  blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
      Integer.MAX_VALUE);
  Assert.assertEquals("Blocks " + b + " has not been abandoned.",
      orginalNumBlocks, blocks.locatedBlockCount() + 1);
}
 
Example 7
Source File: TestBlockToken.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * This test writes a file and gets the block locations without closing the
 * file, and tests the block token in the last block. Block token is verified
 * by ensuring it is of correct kind.
 * 
 * @throws IOException
 * @throws InterruptedException
 */
@Test
public void testBlockTokenInLastLocatedBlock() throws IOException,
    InterruptedException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 512);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .numDataNodes(1).build();
  cluster.waitActive();

  try {
    FileSystem fs = cluster.getFileSystem();
    String fileName = "/testBlockTokenInLastLocatedBlock";
    Path filePath = new Path(fileName);
    FSDataOutputStream out = fs.create(filePath, (short) 1);
    out.write(new byte[1000]);
    // ensure that the first block is written out (see FSOutputSummer#flush)
    out.flush();
    LocatedBlocks locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(
        fileName, 0, 1000);
    while (locatedBlocks.getLastLocatedBlock() == null) {
      Thread.sleep(100);
      locatedBlocks = cluster.getNameNodeRpc().getBlockLocations(fileName, 0,
          1000);
    }
    Token<BlockTokenIdentifier> token = locatedBlocks.getLastLocatedBlock()
        .getBlockToken();
    Assert.assertEquals(BlockTokenIdentifier.KIND_NAME, token.getKind());
    out.close();
  } finally {
    cluster.shutdown();
  }
}
 
Example 8
Source File: TestAbandonBlock.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Test
/** Abandon a block while creating a file */
public void testAbandonBlock() throws IOException {
  String src = FILE_NAME_PREFIX + "foo";

  // Start writing a file but do not close it
  FSDataOutputStream fout = fs.create(new Path(src), true, 4096, (short)1, 512L);
  for (int i = 0; i < 1024; i++) {
    fout.write(123);
  }
  fout.hflush();
  long fileId = ((DFSOutputStream)fout.getWrappedStream()).getFileId();

  // Now abandon the last block
  DFSClient dfsclient = DFSClientAdapter.getDFSClient(fs);
  LocatedBlocks blocks =
    dfsclient.getNamenode().getBlockLocations(src, 0, Integer.MAX_VALUE);
  int orginalNumBlocks = blocks.locatedBlockCount();
  LocatedBlock b = blocks.getLastLocatedBlock();
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);
  
  // call abandonBlock again to make sure the operation is idempotent
  dfsclient.getNamenode().abandonBlock(b.getBlock(), fileId, src,
      dfsclient.clientName);

  // And close the file
  fout.close();

  // Close cluster and check the block has been abandoned after restart
  cluster.restartNameNode();
  blocks = dfsclient.getNamenode().getBlockLocations(src, 0,
      Integer.MAX_VALUE);
  Assert.assertEquals("Blocks " + b + " has not been abandoned.",
      orginalNumBlocks, blocks.locatedBlockCount() + 1);
}
 
Example 9
Source File: TestINodeFileUnderConstructionWithSnapshot.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * call DFSClient#callGetBlockLocations(...) for snapshot file. Make sure only
 * blocks within the size range are returned.
 */
@Test
public void testGetBlockLocations() throws Exception {
  final Path root = new Path("/");
  final Path file = new Path("/file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  
  // take a snapshot on root
  SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
  
  final Path fileInSnapshot = SnapshotTestHelper.getSnapshotPath(root,
      "s1", file.getName());
  FileStatus status = hdfs.getFileStatus(fileInSnapshot);
  // make sure we record the size for the file
  assertEquals(BLOCKSIZE, status.getLen());
  
  // append data to file
  DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE - 1);
  status = hdfs.getFileStatus(fileInSnapshot);
  // the size of snapshot file should still be BLOCKSIZE
  assertEquals(BLOCKSIZE, status.getLen());
  // the size of the file should be (2 * BLOCKSIZE - 1)
  status = hdfs.getFileStatus(file);
  assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
  
  // call DFSClient#callGetBlockLocations for the file in snapshot
  LocatedBlocks blocks = DFSClientAdapter.callGetBlockLocations(
      cluster.getNameNodeRpc(), fileInSnapshot.toString(), 0, Long.MAX_VALUE);
  List<LocatedBlock> blockList = blocks.getLocatedBlocks();
  
  // should be only one block
  assertEquals(BLOCKSIZE, blocks.getFileLength());
  assertEquals(1, blockList.size());
  
  // check the last block
  LocatedBlock lastBlock = blocks.getLastLocatedBlock();
  assertEquals(0, lastBlock.getStartOffset());
  assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
  
  // take another snapshot
  SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
  final Path fileInSnapshot2 = SnapshotTestHelper.getSnapshotPath(root,
      "s2", file.getName());
  
  // append data to file without closing
  HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  status = hdfs.getFileStatus(fileInSnapshot2);
  // the size of snapshot file should be BLOCKSIZE*2-1
  assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
  // the size of the file should be (3 * BLOCKSIZE - 1)
  status = hdfs.getFileStatus(file);
  assertEquals(BLOCKSIZE * 3 - 1, status.getLen());
  
  blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
      fileInSnapshot2.toString(), 0, Long.MAX_VALUE);
  assertFalse(blocks.isUnderConstruction());
  assertTrue(blocks.isLastBlockComplete());
  blockList = blocks.getLocatedBlocks();
  
  // should be 2 blocks
  assertEquals(BLOCKSIZE * 2 - 1, blocks.getFileLength());
  assertEquals(2, blockList.size());
  
  // check the last block
  lastBlock = blocks.getLastLocatedBlock();
  assertEquals(BLOCKSIZE, lastBlock.getStartOffset());
  assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
  
  blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
      fileInSnapshot2.toString(), BLOCKSIZE, 0);
  blockList = blocks.getLocatedBlocks();
  assertEquals(1, blockList.size());
  
  // check blocks for file being written
  blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
      file.toString(), 0, Long.MAX_VALUE);
  blockList = blocks.getLocatedBlocks();
  assertEquals(3, blockList.size());
  assertTrue(blocks.isUnderConstruction());
  assertFalse(blocks.isLastBlockComplete());
  
  lastBlock = blocks.getLastLocatedBlock();
  assertEquals(BLOCKSIZE * 2, lastBlock.getStartOffset());
  assertEquals(BLOCKSIZE - 1, lastBlock.getBlockSize());
  out.close();
}
 
Example 10
Source File: TestINodeFileUnderConstructionWithSnapshot.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * call DFSClient#callGetBlockLocations(...) for snapshot file. Make sure only
 * blocks within the size range are returned.
 */
@Test
public void testGetBlockLocations() throws Exception {
  final Path root = new Path("/");
  final Path file = new Path("/file");
  DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
  
  // take a snapshot on root
  SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
  
  final Path fileInSnapshot = SnapshotTestHelper.getSnapshotPath(root,
      "s1", file.getName());
  FileStatus status = hdfs.getFileStatus(fileInSnapshot);
  // make sure we record the size for the file
  assertEquals(BLOCKSIZE, status.getLen());
  
  // append data to file
  DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE - 1);
  status = hdfs.getFileStatus(fileInSnapshot);
  // the size of snapshot file should still be BLOCKSIZE
  assertEquals(BLOCKSIZE, status.getLen());
  // the size of the file should be (2 * BLOCKSIZE - 1)
  status = hdfs.getFileStatus(file);
  assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
  
  // call DFSClient#callGetBlockLocations for the file in snapshot
  LocatedBlocks blocks = DFSClientAdapter.callGetBlockLocations(
      cluster.getNameNodeRpc(), fileInSnapshot.toString(), 0, Long.MAX_VALUE);
  List<LocatedBlock> blockList = blocks.getLocatedBlocks();
  
  // should be only one block
  assertEquals(BLOCKSIZE, blocks.getFileLength());
  assertEquals(1, blockList.size());
  
  // check the last block
  LocatedBlock lastBlock = blocks.getLastLocatedBlock();
  assertEquals(0, lastBlock.getStartOffset());
  assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
  
  // take another snapshot
  SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
  final Path fileInSnapshot2 = SnapshotTestHelper.getSnapshotPath(root,
      "s2", file.getName());
  
  // append data to file without closing
  HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
  out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
  
  status = hdfs.getFileStatus(fileInSnapshot2);
  // the size of snapshot file should be BLOCKSIZE*2-1
  assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
  // the size of the file should be (3 * BLOCKSIZE - 1)
  status = hdfs.getFileStatus(file);
  assertEquals(BLOCKSIZE * 3 - 1, status.getLen());
  
  blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
      fileInSnapshot2.toString(), 0, Long.MAX_VALUE);
  assertFalse(blocks.isUnderConstruction());
  assertTrue(blocks.isLastBlockComplete());
  blockList = blocks.getLocatedBlocks();
  
  // should be 2 blocks
  assertEquals(BLOCKSIZE * 2 - 1, blocks.getFileLength());
  assertEquals(2, blockList.size());
  
  // check the last block
  lastBlock = blocks.getLastLocatedBlock();
  assertEquals(BLOCKSIZE, lastBlock.getStartOffset());
  assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
  
  blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
      fileInSnapshot2.toString(), BLOCKSIZE, 0);
  blockList = blocks.getLocatedBlocks();
  assertEquals(1, blockList.size());
  
  // check blocks for file being written
  blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
      file.toString(), 0, Long.MAX_VALUE);
  blockList = blocks.getLocatedBlocks();
  assertEquals(3, blockList.size());
  assertTrue(blocks.isUnderConstruction());
  assertFalse(blocks.isLastBlockComplete());
  
  lastBlock = blocks.getLastLocatedBlock();
  assertEquals(BLOCKSIZE * 2, lastBlock.getStartOffset());
  assertEquals(BLOCKSIZE - 1, lastBlock.getBlockSize());
  out.close();
}