Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#waitClusterUp()

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster#waitClusterUp() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestAuditLogger.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that AuditLogger works as expected.
 */
@Test
public void testAuditLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      DummyAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    assertTrue(DummyAuditLogger.initialized);
    DummyAuditLogger.resetLogCount();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    fs.setTimes(new Path("/"), time, time);
    assertEquals(1, DummyAuditLogger.logCount);
  } finally {
    cluster.shutdown();
  }
}
 
Example 2
Source File: TestAuditLogger.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that TopAuditLogger can be disabled
 */
@Test
public void testDisableTopAuditLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.setBoolean(NNTOP_ENABLED_KEY, false);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    List<AuditLogger> auditLoggers =
        cluster.getNameNode().getNamesystem().getAuditLoggers();
    for (AuditLogger auditLogger : auditLoggers) {
      assertFalse(
          "top audit logger is still hooked in after it is disabled",
          auditLogger instanceof TopAuditLogger);
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example 3
Source File: TestAuditLogger.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Minor test related to HADOOP-9155. Verify that during a
 * FileSystem.setPermission() operation, the stat passed in during the
 * logAuditEvent() call returns the new permission rather than the old
 * permission.
 */
@Test
public void testAuditLoggerWithSetPermission() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      DummyAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    assertTrue(DummyAuditLogger.initialized);
    DummyAuditLogger.resetLogCount();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    final Path p = new Path("/");
    fs.setTimes(p, time, time);
    fs.setPermission(p, new FsPermission(TEST_PERMISSION));
    assertEquals(TEST_PERMISSION, DummyAuditLogger.foundPermission);
    assertEquals(2, DummyAuditLogger.logCount);
  } finally {
    cluster.shutdown();
  }
}
 
Example 4
Source File: TestAuditLogger.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that a broken audit logger causes requests to fail.
 */
@Test
public void testBrokenLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      BrokenAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    fs.setTimes(new Path("/"), time, time);
    fail("Expected exception due to broken audit logger.");
  } catch (RemoteException re) {
    // Expected.
  } finally {
    cluster.shutdown();
  }
}
 
Example 5
Source File: TestAuditLogger.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Minor test related to HADOOP-9155. Verify that during a
 * FileSystem.setPermission() operation, the stat passed in during the
 * logAuditEvent() call returns the new permission rather than the old
 * permission.
 */
@Test
public void testAuditLoggerWithSetPermission() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      DummyAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    assertTrue(DummyAuditLogger.initialized);
    DummyAuditLogger.resetLogCount();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    final Path p = new Path("/");
    fs.setTimes(p, time, time);
    fs.setPermission(p, new FsPermission(TEST_PERMISSION));
    assertEquals(TEST_PERMISSION, DummyAuditLogger.foundPermission);
    assertEquals(2, DummyAuditLogger.logCount);
  } finally {
    cluster.shutdown();
  }
}
 
Example 6
Source File: TestAuditLogger.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that AuditLogger works as expected.
 */
@Test
public void testAuditLogger() throws IOException {
  Configuration conf = new HdfsConfiguration();
  conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
      DummyAuditLogger.class.getName());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

  try {
    cluster.waitClusterUp();
    assertTrue(DummyAuditLogger.initialized);
    DummyAuditLogger.resetLogCount();

    FileSystem fs = cluster.getFileSystem();
    long time = System.currentTimeMillis();
    fs.setTimes(new Path("/"), time, time);
    assertEquals(1, DummyAuditLogger.logCount);
  } finally {
    cluster.shutdown();
  }
}
 
Example 7
Source File: TestPartialOpenForWrite.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
protected void setUp() throws Exception {
  super.setUp();

  conf = new Configuration();
  conf.setInt("ipc.client.connect.max.retries", 0);
  cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
  cluster.waitClusterUp();
  fileSystem = cluster.getFileSystem();
}
 
Example 8
Source File: TestDFSIsUnderConstruction.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
protected void setUp() throws Exception {
  super.setUp();
  conf = new Configuration();
  cluster = new MiniDFSCluster(conf, 2, true, new String[]{"/rack1", "/rack2"});
  cluster.waitClusterUp();
  fs = cluster.getFileSystem();
}
 
Example 9
Source File: HdfsRepositoryTest.java    From djl with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public void setup() throws IOException {
    if (System.getProperty("os.name").startsWith("Win")) {
        throw new SkipException("MiniDFSCluster doesn't wupport windows.");
    }

    System.setProperty("DJL_CACHE_DIR", "build/cache");
    String userHome = System.getProperty("user.home");
    System.setProperty("ENGINE_CACHE_DIR", userHome);

    java.nio.file.Path dir = Paths.get("build/test/mlp");
    java.nio.file.Path zipFile = Paths.get("build/test/mlp.zip");
    java.nio.file.Path symbolFile = dir.resolve("mlp-symbol.json");
    java.nio.file.Path paramFile = dir.resolve("mlp-0000.param");
    Files.createDirectories(dir);
    if (Files.notExists(symbolFile)) {
        Files.createFile(symbolFile);
    }
    if (Files.notExists(paramFile)) {
        Files.createFile(paramFile);
    }
    if (Files.notExists(zipFile)) {
        ZipUtils.zip(dir, zipFile);
    }

    Configuration config = new Configuration();
    setFilePermission(config);
    miniDfs = new MiniDFSCluster(config, 1, true, null);
    miniDfs.waitClusterUp();
    FileSystem fs = miniDfs.getFileSystem();
    fs.copyFromLocalFile(new Path(zipFile.toString()), new Path("/mlp.zip"));
    fs.copyFromLocalFile(new Path(symbolFile.toString()), new Path("/mlp/mlp-symbol.json"));
    fs.copyFromLocalFile(new Path(paramFile.toString()), new Path("/mlp/mlp-0000.param"));
}
 
Example 10
Source File: TestHDFSConcat.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Before
public void startUpCluster() throws IOException {
  cluster = new MiniDFSCluster(conf, REPL_FACTOR, true, null);
  assertNotNull("Failed Cluster Creation", cluster);
  cluster.waitClusterUp();
  dfs = (DistributedFileSystem) cluster.getFileSystem();
  assertNotNull("Failed to get FileSystem", dfs);
  nn = cluster.getNameNode();
  assertNotNull("Failed to get NameNode", nn);
}
 
Example 11
Source File: UnitTestCluster.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
private MiniDFSCluster createMiniDFSCluster(Configuration conf, String dir, int numDataNodes) throws IOException {
  if (this.hdfsCluster == null) {
    File baseDir = new File(dir).getAbsoluteFile();
    FileUtil.fullyDelete(baseDir);
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath());
    MiniDFSCluster _hdfsCluster =
      new MiniDFSCluster.Builder(conf).
      numDataNodes(numDataNodes).
      build();
    _hdfsCluster.waitClusterUp();
    this.hdfsCluster = _hdfsCluster;
  }
  return this.hdfsCluster;
}
 
Example 12
Source File: TestFsck.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the # of misreplaced replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMisPlacedReplicas() throws IOException {
  // Desired replication factor
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  short NUM_DN = 2;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  String [] racks = {"/rack1", "/rack1"};
  String [] hosts = {"host1", "host2"};
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
        .racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    // Add a new node on different rack, so previous blocks' replicas 
    // are considered to be misplaced
    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
    NUM_DN++;
    
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_DN, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // check misReplicatedBlock number.
    assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 13
Source File: TestDFSConcurrentFileOperations.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private void init(Configuration conf) throws IOException {
  cluster = new MiniDFSCluster(conf, 3, true, new String[]{"/rack1", "/rack2", "/rack1"});
  cluster.waitClusterUp();
  fs = cluster.getFileSystem();
}
 
Example 14
Source File: TestWaitingRoomPurger.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void testWaitingRoomPurger() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
  cluster.waitClusterUp();
  FileSystem fs = cluster.getFileSystem();

  WaitingRoom wr = new WaitingRoom(conf);
  WaitingRoomPurger purger = wr.getPurger();
  SnapshotNode ssNode = new SnapshotNode(conf);
  ssNode.shutdownWaitingRoomPurger();

  String wrPath = conf.get("fs.snapshot.waitingroom", "/.WR");    
  String ssDir = conf.get("fs.snapshot.dir", "/.SNAPSHOT");

  Path foo = new Path("/foo");
  Path bar = new Path("/hadoop/bar");
  Path mash = new Path("/hadoop/mash");

  FSDataOutputStream stream;

  // Create foo
  stream = fs.create(foo);
  stream.writeByte(0);
  stream.close();
  
  // Move foo to waiting room.
  assertTrue(wr.moveToWaitingRoom(foo));
  
  // Create snapshot
  ssNode.createSnapshot("first", false); // contains nothing

  // Create bar (V1)
  stream = fs.create(bar);
  stream.write(0);
  stream.close();

  // Create mash (empty)
  stream = fs.create(mash);
  stream.close();

  // Create snapshot
  ssNode.createSnapshot("second", false); // contains bar (V1), mash

  // Move mash, bar to waiting room
  assertTrue(wr.moveToWaitingRoom(mash));
  assertTrue(wr.moveToWaitingRoom(bar));

  // Create bar (V2)
  stream = fs.create(bar);
  stream.write(0);
  stream.close();

  ssNode.createSnapshot("third", false); // contains bar (V2)

  // Verify fs state right now
  assertTrue(fs.exists(bar));
  assertFalse(fs.exists(foo));
  assertFalse(fs.exists(mash));
  assertTrue(fs.exists(new Path(wrPath + "/foo")));
  assertTrue(fs.exists(new Path(wrPath + "/hadoop/bar")));
  assertTrue(fs.exists(new Path(wrPath + "/hadoop/mash")));

  // Run purger
  purger.purge();

  // Verify fs state right now
  assertTrue(fs.exists(bar));
  assertFalse(fs.exists(foo));
  assertFalse(fs.exists(mash));
  assertFalse(fs.exists(new Path(wrPath + "/foo"))); // deleted: unreferenced
  assertTrue(fs.exists(new Path(wrPath + "/hadoop/bar")));
  assertFalse(fs.exists(new Path(wrPath + "/hadoop/mash"))); // deleted: empty

  // Delete snapshot 'second'
  boolean success = fs.delete(new Path(ssDir + "/" + SnapshotNode.SSNAME + "second"));
  assertTrue(success);

  // Run purger again
  purger.purge();

  // Verify fs state right now
  assertTrue(fs.exists(bar));
  assertFalse(fs.exists(foo));
  assertFalse(fs.exists(mash));
  assertFalse(fs.exists(new Path(wrPath + "/foo"))); // deleted: last run
  assertFalse(fs.exists(new Path(wrPath + "/hadoop/bar"))); // deleted: unreferenced
  assertFalse(fs.exists(new Path(wrPath + "/hadoop/mash"))); // deleted: last run
}
 
Example 15
Source File: TestFsck.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test for blockIdCK
 */

@Test
public void testBlockIdCK() throws Exception {

  final short REPL_FACTOR = 2;
  short NUM_DN = 2;
  final long blockSize = 512;

  String [] racks = {"/rack1", "/rack2"};
  String [] hosts = {"host1", "host2"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  cluster =
    new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
      .racks(racks).build();

  assertNotNull("Failed Cluster Creation", cluster);
  cluster.waitClusterUp();
  dfs = cluster.getFileSystem();
  assertNotNull("Failed to get FileSystem", dfs);

  DFSTestUtil util = new DFSTestUtil.Builder().
    setName(getClass().getSimpleName()).setNumFiles(1).build();
  //create files
  final String pathString = new String("/testfile");
  final Path path = new Path(pathString);
  util.createFile(dfs, path, 1024, REPL_FACTOR , 1000L);
  util.waitReplication(dfs, path, REPL_FACTOR);
  StringBuilder sb = new StringBuilder();
  for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
    sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
  }
  String[] bIds = sb.toString().split(" ");

  //run fsck
  try {
    //illegal input test
    String runFsckResult = runFsck(conf, 0, true, "/", "-blockId",
        "not_a_block_id");
    assertTrue(runFsckResult.contains("Incorrect blockId format:"));

    //general test
    runFsckResult = runFsck(conf, 0, true, "/", "-blockId", sb.toString());
    assertTrue(runFsckResult.contains(bIds[0]));
    assertTrue(runFsckResult.contains(bIds[1]));
    assertTrue(runFsckResult.contains(
        "Block replica on datanode/rack: host1/rack1 is HEALTHY"));
    assertTrue(runFsckResult.contains(
        "Block replica on datanode/rack: host2/rack2 is HEALTHY"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 16
Source File: TestFsck.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the # of missing block replicas and expected replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMissingReplicas() throws IOException {
  // Desired replication factor
  // Set this higher than NUM_REPLICAS so it's under-replicated
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  final short NUM_REPLICAS = 1;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_REPLICAS, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // Also print the output from the fsck, for ex post facto sanity checks
    System.out.println(result.toString());
    assertEquals(res.missingReplicas, 
        (NUM_BLOCKS*REPL_FACTOR) - (NUM_BLOCKS*NUM_REPLICAS));
    assertEquals(res.numExpectedReplicas, NUM_BLOCKS*REPL_FACTOR);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 17
Source File: TestFsck.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test for blockIdCK with block corruption
 */
@Test
public void testBlockIdCKCorruption() throws Exception {
  short NUM_DN = 1;
  final long blockSize = 512;
  Random random = new Random();
  DFSClient dfsClient;
  LocatedBlocks blocks;
  ExtendedBlock block;
  short repFactor = 1;
  String [] racks = {"/rack1"};
  String [] hosts = {"host1"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  // Set short retry timeouts so this test runs faster
  conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  try {
    cluster =
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
            .racks(racks).build();

    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);

    DFSTestUtil util = new DFSTestUtil.Builder().
      setName(getClass().getSimpleName()).setNumFiles(1).build();
    //create files
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    util.createFile(dfs, path, 1024, repFactor, 1000L);
    util.waitReplication(dfs, path, repFactor);
    StringBuilder sb = new StringBuilder();
    for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
      sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
    }
    String[] bIds = sb.toString().split(" ");

    //make sure block is healthy before we corrupt it
    String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));

    // corrupt replicas
    block = DFSTestUtil.getFirstBlock(dfs, path);
    File blockFile = cluster.getBlockFile(0, block);
    if (blockFile != null && blockFile.exists()) {
      RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
      FileChannel channel = raFile.getChannel();
      String badString = "BADBAD";
      int rand = random.nextInt((int) channel.size()/2);
      raFile.seek(rand);
      raFile.write(badString.getBytes());
      raFile.close();
    }

    util.waitCorruptReplicas(dfs, cluster.getNamesystem(), path, block, 1);

    outStr = runFsck(conf, 1, false, "/", "-blockId", block.getBlockName());
    System.out.println(outStr);
    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 18
Source File: TestFsck.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test for blockIdCK
 */

@Test
public void testBlockIdCK() throws Exception {

  final short REPL_FACTOR = 2;
  short NUM_DN = 2;
  final long blockSize = 512;

  String [] racks = {"/rack1", "/rack2"};
  String [] hosts = {"host1", "host2"};

  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);

  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  cluster =
    new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
      .racks(racks).build();

  assertNotNull("Failed Cluster Creation", cluster);
  cluster.waitClusterUp();
  dfs = cluster.getFileSystem();
  assertNotNull("Failed to get FileSystem", dfs);

  DFSTestUtil util = new DFSTestUtil.Builder().
    setName(getClass().getSimpleName()).setNumFiles(1).build();
  //create files
  final String pathString = new String("/testfile");
  final Path path = new Path(pathString);
  util.createFile(dfs, path, 1024, REPL_FACTOR , 1000L);
  util.waitReplication(dfs, path, REPL_FACTOR);
  StringBuilder sb = new StringBuilder();
  for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
    sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
  }
  String[] bIds = sb.toString().split(" ");

  //run fsck
  try {
    //illegal input test
    String runFsckResult = runFsck(conf, 0, true, "/", "-blockId",
        "not_a_block_id");
    assertTrue(runFsckResult.contains("Incorrect blockId format:"));

    //general test
    runFsckResult = runFsck(conf, 0, true, "/", "-blockId", sb.toString());
    assertTrue(runFsckResult.contains(bIds[0]));
    assertTrue(runFsckResult.contains(bIds[1]));
    assertTrue(runFsckResult.contains(
        "Block replica on datanode/rack: host1/rack1 is HEALTHY"));
    assertTrue(runFsckResult.contains(
        "Block replica on datanode/rack: host2/rack2 is HEALTHY"));
  } finally {
    cluster.shutdown();
  }
}
 
Example 19
Source File: TestFsck.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the # of misreplaced replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMisPlacedReplicas() throws IOException {
  // Desired replication factor
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  short NUM_DN = 2;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  String [] racks = {"/rack1", "/rack1"};
  String [] hosts = {"host1", "host2"};
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
        .racks(racks).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    // Add a new node on different rack, so previous blocks' replicas 
    // are considered to be misplaced
    nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
    NUM_DN++;
    
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_DN, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // check misReplicatedBlock number.
    assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 20
Source File: TestFsck.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the # of missing block replicas and expected replicas is correct
 * @throws IOException
 */
@Test
public void testFsckMissingReplicas() throws IOException {
  // Desired replication factor
  // Set this higher than NUM_REPLICAS so it's under-replicated
  final short REPL_FACTOR = 2;
  // Number of replicas to actually start
  final short NUM_REPLICAS = 1;
  // Number of blocks to write
  final short NUM_BLOCKS = 3;
  // Set a small-ish blocksize
  final long blockSize = 512;
  
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
  
  MiniDFSCluster cluster = null;
  DistributedFileSystem dfs = null;
  
  try {
    // Startup a minicluster
    cluster = 
        new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
    assertNotNull("Failed Cluster Creation", cluster);
    cluster.waitClusterUp();
    dfs = cluster.getFileSystem();
    assertNotNull("Failed to get FileSystem", dfs);
    
    // Create a file that will be intentionally under-replicated
    final String pathString = new String("/testfile");
    final Path path = new Path(pathString);
    long fileLen = blockSize * NUM_BLOCKS;
    DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
    
    // Create an under-replicated file
    NameNode namenode = cluster.getNameNode();
    NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
        .getDatanodeManager().getNetworkTopology();
    Map<String,String[]> pmap = new HashMap<String, String[]>();
    Writer result = new StringWriter();
    PrintWriter out = new PrintWriter(result, true);
    InetAddress remoteAddress = InetAddress.getLocalHost();
    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, 
        NUM_REPLICAS, remoteAddress);
    
    // Run the fsck and check the Result
    final HdfsFileStatus file = 
        namenode.getRpcServer().getFileInfo(pathString);
    assertNotNull(file);
    Result res = new Result(conf);
    fsck.check(pathString, file, res);
    // Also print the output from the fsck, for ex post facto sanity checks
    System.out.println(result.toString());
    assertEquals(res.missingReplicas, 
        (NUM_BLOCKS*REPL_FACTOR) - (NUM_BLOCKS*NUM_REPLICAS));
    assertEquals(res.numExpectedReplicas, NUM_BLOCKS*REPL_FACTOR);
  } finally {
    if(dfs != null) {
      dfs.close();
    }
    if(cluster != null) {
      cluster.shutdown();
    }
  }
}