Java Code Examples for org.apache.hadoop.hdfs.DFSTestUtil#getAllBlocks()

The following examples show how to use org.apache.hadoop.hdfs.DFSTestUtil#getAllBlocks() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSequentialBlockId.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test that block IDs are generated sequentially.
 *
 * @throws IOException
 */
@Test
public void testBlockIdGeneration() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();

    // Create a file that is 10 blocks long.
    Path path = new Path("testBlockIdGeneration.dat");
    DFSTestUtil.createFile(
        fs, path, IO_SIZE, BLOCK_SIZE * 10, BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs, path);
    LOG.info("Block0 id is " + blocks.get(0).getBlock().getBlockId());
    long nextBlockExpectedId = blocks.get(0).getBlock().getBlockId() + 1;

    // Ensure that the block IDs are sequentially increasing.
    for (int i = 1; i < blocks.size(); ++i) {
      long nextBlockId = blocks.get(i).getBlock().getBlockId();
      LOG.info("Block" + i + " id is " + nextBlockId);
      assertThat(nextBlockId, is(nextBlockExpectedId));
      ++nextBlockExpectedId;
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 2
Source File: TestSequentialBlockId.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test that block IDs are generated sequentially.
 *
 * @throws IOException
 */
@Test
public void testBlockIdGeneration() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();

    // Create a file that is 10 blocks long.
    Path path = new Path("testBlockIdGeneration.dat");
    DFSTestUtil.createFile(
        fs, path, IO_SIZE, BLOCK_SIZE * 10, BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks = DFSTestUtil.getAllBlocks(fs, path);
    LOG.info("Block0 id is " + blocks.get(0).getBlock().getBlockId());
    long nextBlockExpectedId = blocks.get(0).getBlock().getBlockId() + 1;

    // Ensure that the block IDs are sequentially increasing.
    for (int i = 1; i < blocks.size(); ++i) {
      long nextBlockId = blocks.get(i).getBlock().getBlockId();
      LOG.info("Block" + i + " id is " + nextBlockId);
      assertThat(nextBlockId, is(nextBlockExpectedId));
      ++nextBlockExpectedId;
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 3
Source File: TestFsck.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test for blockIdCK
 */

@Test
public void testBlockIdCK() throws Exception {

  final short REPL_FACTOR = 2;
  short NUM_DN = 2;
  final long blockSize = 512;

  String [] racks = {"/rack1", "/rack2"};
  String [] hosts = {"host1", "host2"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  cluster =
    new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
      .racks(racks).build();

  assertNotNull("Failed Cluster Creation", cluster);
  cluster.waitClusterUp();
  dfs = cluster.getFileSystem();
  assertNotNull("Failed to get FileSystem", dfs);

  DFSTestUtil util = new DFSTestUtil.Builder().
    setName(getClass().getSimpleName()).setNumFiles(1).build();
  //create files
  final String pathString = new String("/testfile");
  final Path path = new Path(pathString);
  util.createFile(dfs, path, 1024, REPL_FACTOR , 1000L);
  util.waitReplication(dfs, path, REPL_FACTOR);
  StringBuilder sb = new StringBuilder();
  for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
    sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
  }
  String[] bIds = sb.toString().split(" ");

  //run fsck
  try {
    //illegal input test
    String runFsckResult = runFsck(conf, 0, true, "/", "-blockId",
        "not_a_block_id");
    assertTrue(runFsckResult.contains("Incorrect blockId format:"));

    //general test
    runFsckResult = runFsck(conf, 0, true, "/", "-blockId", sb.toString());
    assertTrue(runFsckResult.contains(bIds[0]));
    assertTrue(runFsckResult.contains(bIds[1]));
    assertTrue(runFsckResult.contains(
        "Block replica on datanode/rack: host1/rack1 is HEALTHY"));
    assertTrue(runFsckResult.contains(
        "Block replica on datanode/rack: host2/rack2 is HEALTHY"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 4
Source File: TestFsck.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test for blockIdCK with block corruption
 */
@Test
public void testBlockIdCKCorruption() throws Exception {
  short NUM_DN = 1;
  final long blockSize = 512;
  Random random = new Random();
  DFSClient dfsClient;
  LocatedBlocks blocks;
  ExtendedBlock block;
  short repFactor = 1;
  String [] racks = {"/rack1"};
  String [] hosts = {"host1"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  try {
    cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
            .racks(racks).build();

    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);

    DFSTestUtil util = new DFSTestUtil.Builder().
      setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, repFactor, 1000L);
    util.waitReplication(dfs, path, repFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
      sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
    }
    String[] bIds = sb.toString().split(" ");

    //make sure block is healthy before we corrupt it
    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));

    // corrupt replicas
    block = DFSTestUtil.getFirstBlock(dfs, path);
    File blockFile = cluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
      FileChannel channel = raFile.getChannel();
      String badString = "BADBAD";
      int rand = random.nextInt((int) channel.size()/2);
      raFile.seek(rand);
      raFile.write(badString.getBytes());
      raFile.close();
    }

    util.waitCorruptReplicas(dfs, cluster.getNamesystem(), path, block, 1);

    outStr = runFsck(conf, 1, false, "/", "-blockId", block.getBlockName());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 5
Source File: TestSequentialBlockId.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test that collisions in the block ID space are handled gracefully.
 *
 * @throws IOException
 */
@Test
public void testTriggerBlockIdCollision() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    FSNamesystem fsn = cluster.getNamesystem();
    final int blockCount = 10;


    // Create a file with a few blocks to rev up the global block ID
    // counter.
    Path path1 = new Path("testBlockIdCollisionDetection_file1.dat");
    DFSTestUtil.createFile(
        fs, path1, IO_SIZE, BLOCK_SIZE * blockCount,
        BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks1 = DFSTestUtil.getAllBlocks(fs, path1);


    // Rewind the block ID counter in the name system object. This will result
    // in block ID collisions when we try to allocate new blocks.
    SequentialBlockIdGenerator blockIdGenerator = fsn.getBlockIdManager()
      .getBlockIdGenerator();
    blockIdGenerator.setCurrentValue(blockIdGenerator.getCurrentValue() - 5);

    // Trigger collisions by creating a new file.
    Path path2 = new Path("testBlockIdCollisionDetection_file2.dat");
    DFSTestUtil.createFile(
        fs, path2, IO_SIZE, BLOCK_SIZE * blockCount,
        BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks2 = DFSTestUtil.getAllBlocks(fs, path2);
    assertThat(blocks2.size(), is(blockCount));

    // Make sure that file2 block IDs start immediately after file1
    assertThat(blocks2.get(0).getBlock().getBlockId(),
               is(blocks1.get(9).getBlock().getBlockId() + 1));

  } finally {
    cluster.shutdown();
  }
}
 
Example 6
Source File: TestFsck.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test for blockIdCK
 */

@Test
public void testBlockIdCK() throws Exception {

  final short REPL_FACTOR = 2;
  short NUM_DN = 2;
  final long blockSize = 512;

  String [] racks = {"/rack1", "/rack2"};
  String [] hosts = {"host1", "host2"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  cluster =
    new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
      .racks(racks).build();

  assertNotNull("Failed Cluster Creation", cluster);
  cluster.waitClusterUp();
  dfs = cluster.getFileSystem();
  assertNotNull("Failed to get FileSystem", dfs);

  DFSTestUtil util = new DFSTestUtil.Builder().
    setName(getClass().getSimpleName()).setNumFiles(1).build();
  //create files
  final String pathString = new String("/testfile");
  final Path path = new Path(pathString);
  util.createFile(dfs, path, 1024, REPL_FACTOR , 1000L);
  util.waitReplication(dfs, path, REPL_FACTOR);
  StringBuilder sb = new StringBuilder();
  for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
    sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
  }
  String[] bIds = sb.toString().split(" ");

  //run fsck
  try {
    //illegal input test
    String runFsckResult = runFsck(conf, 0, true, "/", "-blockId",
        "not_a_block_id");
    assertTrue(runFsckResult.contains("Incorrect blockId format:"));

    //general test
    runFsckResult = runFsck(conf, 0, true, "/", "-blockId", sb.toString());
    assertTrue(runFsckResult.contains(bIds[0]));
    assertTrue(runFsckResult.contains(bIds[1]));
    assertTrue(runFsckResult.contains(
        "Block replica on datanode/rack: host1/rack1 is HEALTHY"));
    assertTrue(runFsckResult.contains(
        "Block replica on datanode/rack: host2/rack2 is HEALTHY"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 7
Source File: TestFsck.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test for blockIdCK with block corruption
 */
@Test
public void testBlockIdCKCorruption() throws Exception {
  short NUM_DN = 1;
  final long blockSize = 512;
  Random random = new Random();
  DFSClient dfsClient;
  LocatedBlocks blocks;
  ExtendedBlock block;
  short repFactor = 1;
  String [] racks = {"/rack1"};
  String [] hosts = {"host1"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  try {
    cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
            .racks(racks).build();

    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);

    DFSTestUtil util = new DFSTestUtil.Builder().
      setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, repFactor, 1000L);
    util.waitReplication(dfs, path, repFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
      sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
    }
    String[] bIds = sb.toString().split(" ");

    //make sure block is healthy before we corrupt it
    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));

    // corrupt replicas
    block = DFSTestUtil.getFirstBlock(dfs, path);
    File blockFile = cluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
      FileChannel channel = raFile.getChannel();
      String badString = "BADBAD";
      int rand = random.nextInt((int) channel.size()/2);
      raFile.seek(rand);
      raFile.write(badString.getBytes());
      raFile.close();
    }

    util.waitCorruptReplicas(dfs, cluster.getNamesystem(), path, block, 1);

    outStr = runFsck(conf, 1, false, "/", "-blockId", block.getBlockName());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 8
Source File: TestSequentialBlockId.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test that collisions in the block ID space are handled gracefully.
 *
 * @throws IOException
 */
@Test
public void testTriggerBlockIdCollision() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();

  try {
    cluster.waitActive();
    FileSystem fs = cluster.getFileSystem();
    FSNamesystem fsn = cluster.getNamesystem();
    final int blockCount = 10;


    // Create a file with a few blocks to rev up the global block ID
    // counter.
    Path path1 = new Path("testBlockIdCollisionDetection_file1.dat");
    DFSTestUtil.createFile(
        fs, path1, IO_SIZE, BLOCK_SIZE * blockCount,
        BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks1 = DFSTestUtil.getAllBlocks(fs, path1);


    // Rewind the block ID counter in the name system object. This will result
    // in block ID collisions when we try to allocate new blocks.
    SequentialBlockIdGenerator blockIdGenerator = fsn.getBlockIdManager()
      .getBlockIdGenerator();
    blockIdGenerator.setCurrentValue(blockIdGenerator.getCurrentValue() - 5);

    // Trigger collisions by creating a new file.
    Path path2 = new Path("testBlockIdCollisionDetection_file2.dat");
    DFSTestUtil.createFile(
        fs, path2, IO_SIZE, BLOCK_SIZE * blockCount,
        BLOCK_SIZE, REPLICATION, SEED);
    List<LocatedBlock> blocks2 = DFSTestUtil.getAllBlocks(fs, path2);
    assertThat(blocks2.size(), is(blockCount));

    // Make sure that file2 block IDs start immediately after file1
    assertThat(blocks2.get(0).getBlock().getBlockId(),
               is(blocks1.get(9).getBlock().getBlockId() + 1));

  } finally {
    cluster.shutdown();
  }
}