Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster#getInstanceStorageDir()

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster#getInstanceStorageDir() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestListCorruptFileBlocks.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test (timeout=300000)
public void testlistCorruptFileBlocks() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans
                                                         // directories
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
        setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData");

    final NameNode namenode = cluster.getNameNode();
    Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = 
      namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.size();
    assertTrue(numCorrupt == 0);
    // delete the blocks
    String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i = 0; i < 4; i++) {
      for (int j = 0; j <= 1; j++) {
        File storageDir = cluster.getInstanceStorageDir(i, j);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
            data_dir);
        if (metadataFiles == null)
          continue;
        // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
        // (blocks.length > 0));
        for (File metadataFile : metadataFiles) {
          File blockFile = Block.metaToBlockFile(metadataFile);
          LOG.info("Deliberately removing file " + blockFile.getName());
          assertTrue("Cannot remove file.", blockFile.delete());
          LOG.info("Deliberately removing file " + metadataFile.getName());
          assertTrue("Cannot remove file.", metadataFile.delete());
          // break;
        }
      }
    }

    int count = 0;
    corruptFileBlocks = namenode.getNamesystem().
      listCorruptFileBlocks("/corruptData", null);
    numCorrupt = corruptFileBlocks.size();
    while (numCorrupt < 3) {
      Thread.sleep(1000);
      corruptFileBlocks = namenode.getNamesystem()
          .listCorruptFileBlocks("/corruptData", null);
      numCorrupt = corruptFileBlocks.size();
      count++;
      if (count > 30)
        break;
    }
    // Validate we get all the corrupt files
    LOG.info("Namenode has bad files. " + numCorrupt);
    assertTrue(numCorrupt == 3);
    // test the paging here

    FSNamesystem.CorruptFileBlockInfo[] cfb = corruptFileBlocks
        .toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
    // now get the 2nd and 3rd file that is corrupt
    String[] cookie = new String[]{"1"};
    Collection<FSNamesystem.CorruptFileBlockInfo> nextCorruptFileBlocks =
      namenode.getNamesystem()
        .listCorruptFileBlocks("/corruptData", cookie);
    FSNamesystem.CorruptFileBlockInfo[] ncfb = nextCorruptFileBlocks
        .toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
    numCorrupt = nextCorruptFileBlocks.size();
    assertTrue(numCorrupt == 2);
    assertTrue(ncfb[0].block.getBlockName()
        .equalsIgnoreCase(cfb[1].block.getBlockName()));

    corruptFileBlocks =
      namenode.getNamesystem()
        .listCorruptFileBlocks("/corruptData", cookie);
    numCorrupt = corruptFileBlocks.size();
    assertTrue(numCorrupt == 0);
    // Do a listing on a dir which doesn't have any corrupt blocks and
    // validate
    util.createFiles(fs, "/goodData");
    corruptFileBlocks = 
      namenode.getNamesystem().listCorruptFileBlocks("/goodData", null);
    numCorrupt = corruptFileBlocks.size();
    assertTrue(numCorrupt == 0);
    util.cleanup(fs, "/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 2
Source File: TestListCorruptFileBlocks.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * test listCorruptFileBlocks in DistributedFileSystem
 */
@Test (timeout=300000)
public void testlistCorruptFileBlocksDFS() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans
                                                         // directories
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).
        setMaxLevels(1).setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData");

    RemoteIterator<Path> corruptFileBlocks = 
      dfs.listCorruptFileBlocks(new Path("/corruptData"));
    int numCorrupt = countPaths(corruptFileBlocks);
    assertTrue(numCorrupt == 0);
    // delete the blocks
    String bpid = cluster.getNamesystem().getBlockPoolId();
    // For loop through number of datadirectories per datanode (2)
    for (int i = 0; i < 2; i++) {
      File storageDir = cluster.getInstanceStorageDir(0, i);
      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
      List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
          data_dir);
      if (metadataFiles == null)
        continue;
      // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
      // (blocks.length > 0));
      for (File metadataFile : metadataFiles) {
        File blockFile = Block.metaToBlockFile(metadataFile);
        LOG.info("Deliberately removing file " + blockFile.getName());
        assertTrue("Cannot remove file.", blockFile.delete());
        LOG.info("Deliberately removing file " + metadataFile.getName());
        assertTrue("Cannot remove file.", metadataFile.delete());
        // break;
      }
    }

    int count = 0;
    corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
    numCorrupt = countPaths(corruptFileBlocks);
    while (numCorrupt < 3) {
      Thread.sleep(1000);
      corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
      numCorrupt = countPaths(corruptFileBlocks);
      count++;
      if (count > 30)
        break;
    }
    // Validate we get all the corrupt files
    LOG.info("Namenode has bad files. " + numCorrupt);
    assertTrue(numCorrupt == 3);

    util.cleanup(fs, "/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 3
Source File: TestListCorruptFileBlocks.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/**
 * Test if NN.listCorruptFiles() returns the right number of results.
 * The corrupt blocks are detected by the BlockPoolSliceScanner.
 * Also, test that DFS.listCorruptFileBlocks can make multiple successive
 * calls.
 */
@Test (timeout=300000)
public void testMaxCorruptFiles() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports
    cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    final int maxCorruptFileBlocks = 
      FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED;

    // create 110 files with one block each
    DFSTestUtil util = new DFSTestUtil.Builder().setName("testMaxCorruptFiles").
        setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512).
        build();
    util.createFiles(fs, "/srcdat2", (short) 1);
    util.waitReplication(fs, "/srcdat2", (short) 1);

    // verify that there are no bad blocks.
    final NameNode namenode = cluster.getNameNode();
    Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
      getNamesystem().listCorruptFileBlocks("/srcdat2", null);
    assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.",
        badFiles.size() == 0);

    // Now deliberately blocks from all files
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i=0; i<4; i++) {
      for (int j=0; j<=1; j++) {
        File storageDir = cluster.getInstanceStorageDir(i, j);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        LOG.info("Removing files from " + data_dir);
        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
            data_dir);
        if (metadataFiles == null)
          continue;
        for (File metadataFile : metadataFiles) {
          File blockFile = Block.metaToBlockFile(metadataFile);
          assertTrue("Cannot remove file.", blockFile.delete());
          assertTrue("Cannot remove file.", metadataFile.delete());
        }
      }
    }

    // Occasionally the BlockPoolSliceScanner can run before we have removed
    // the blocks. Restart the Datanode to trigger the scanner into running
    // once more.
    LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner");
    cluster.restartDataNodes();
    cluster.waitActive();

    badFiles = 
      namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
      
     while (badFiles.size() < maxCorruptFileBlocks) {
      LOG.info("# of corrupt files is: " + badFiles.size());
      Thread.sleep(10000);
      badFiles = namenode.getNamesystem().
        listCorruptFileBlocks("/srcdat2", null);
    }
    badFiles = namenode.getNamesystem().
      listCorruptFileBlocks("/srcdat2", null); 
    LOG.info("Namenode has bad files. " + badFiles.size());
    assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " + 
        maxCorruptFileBlocks + ".",
        badFiles.size() == maxCorruptFileBlocks);

    CorruptFileBlockIterator iter = (CorruptFileBlockIterator)
      fs.listCorruptFileBlocks(new Path("/srcdat2"));
    int corruptPaths = countPaths(iter);
    assertTrue("Expected more than " + maxCorruptFileBlocks +
               " corrupt file blocks but got " + corruptPaths,
               corruptPaths > maxCorruptFileBlocks);
    assertTrue("Iterator should have made more than 1 call but made " +
               iter.getCallsMade(),
               iter.getCallsMade() > 1);

    util.cleanup(fs, "/srcdat2");
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example 4
Source File: TestFsck.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** check if option -list-corruptfiles of fsck command works properly */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
        setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData", (short) 1);
    util.waitReplication(fs, "/corruptData", (short) 1);

    // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
    String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i=0; i<4; i++) {
      for (int j=0; j<=1; j++) {
        File storageDir = cluster.getInstanceStorageDir(i, j);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
            data_dir);
        if (metadataFiles == null)
          continue;
        for (File metadataFile : metadataFiles) {
          File blockFile = Block.metaToBlockFile(metadataFile);
          assertTrue("Cannot remove file.", blockFile.delete());
          assertTrue("Cannot remove file.", metadataFile.delete());
        }
      }
    }

    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks = namenode
        .listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
      Thread.sleep(1000);
      corruptFileBlocks = namenode
          .listCorruptFileBlocks("/corruptData", null);
      numCorrupt = corruptFileBlocks.getFiles().length;
    }
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("2. bad fsck out: " + outStr);
    assertTrue(outStr.contains("has 3 CORRUPT files"));

    // Do a listing on a dir which doesn't have any corrupt blocks and validate
    util.createFiles(fs, "/goodData");
    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
    System.out.println("3. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    util.cleanup(fs,"/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 5
Source File: TestDeleteBlockPool.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testDfsAdminDeleteBlockPool() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    conf.set(DFSConfigKeys.DFS_NAMESERVICES,
        "namesServerId1,namesServerId2");
    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
          conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
      .numDataNodes(1).build();

    cluster.waitActive();

    FileSystem fs1 = cluster.getFileSystem(0);
    FileSystem fs2 = cluster.getFileSystem(1);

    DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 1, 54);
    DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 1, 54);

    DataNode dn1 = cluster.getDataNodes().get(0);

    String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
    String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
    
    File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
    File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
    
    Configuration nn1Conf = cluster.getConfiguration(0);
    nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
    dn1.refreshNamenodes(nn1Conf);
    assertEquals(1, dn1.getAllBpOs().length);
    
    DFSAdmin admin = new DFSAdmin(nn1Conf);
    String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
    String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
    
    int ret = admin.run(args);
    assertFalse(0 == ret);

    verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
    verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
    
    String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
    ret = admin.run(forceArgs);
    assertEquals(0, ret);
    
    verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
    verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
    
    //bpid1 remains good
    verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
    verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
    
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 6
Source File: TestListCorruptFileBlocks.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test (timeout=300000)
public void testlistCorruptFileBlocks() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans
                                                         // directories
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
        setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData");

    final NameNode namenode = cluster.getNameNode();
    Collection<FSNamesystem.CorruptFileBlockInfo> corruptFileBlocks = 
      namenode.getNamesystem().listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.size();
    assertTrue(numCorrupt == 0);
    // delete the blocks
    String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i = 0; i < 4; i++) {
      for (int j = 0; j <= 1; j++) {
        File storageDir = cluster.getInstanceStorageDir(i, j);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
            data_dir);
        if (metadataFiles == null)
          continue;
        // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
        // (blocks.length > 0));
        for (File metadataFile : metadataFiles) {
          File blockFile = Block.metaToBlockFile(metadataFile);
          LOG.info("Deliberately removing file " + blockFile.getName());
          assertTrue("Cannot remove file.", blockFile.delete());
          LOG.info("Deliberately removing file " + metadataFile.getName());
          assertTrue("Cannot remove file.", metadataFile.delete());
          // break;
        }
      }
    }

    int count = 0;
    corruptFileBlocks = namenode.getNamesystem().
      listCorruptFileBlocks("/corruptData", null);
    numCorrupt = corruptFileBlocks.size();
    while (numCorrupt < 3) {
      Thread.sleep(1000);
      corruptFileBlocks = namenode.getNamesystem()
          .listCorruptFileBlocks("/corruptData", null);
      numCorrupt = corruptFileBlocks.size();
      count++;
      if (count > 30)
        break;
    }
    // Validate we get all the corrupt files
    LOG.info("Namenode has bad files. " + numCorrupt);
    assertTrue(numCorrupt == 3);
    // test the paging here

    FSNamesystem.CorruptFileBlockInfo[] cfb = corruptFileBlocks
        .toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
    // now get the 2nd and 3rd file that is corrupt
    String[] cookie = new String[]{"1"};
    Collection<FSNamesystem.CorruptFileBlockInfo> nextCorruptFileBlocks =
      namenode.getNamesystem()
        .listCorruptFileBlocks("/corruptData", cookie);
    FSNamesystem.CorruptFileBlockInfo[] ncfb = nextCorruptFileBlocks
        .toArray(new FSNamesystem.CorruptFileBlockInfo[0]);
    numCorrupt = nextCorruptFileBlocks.size();
    assertTrue(numCorrupt == 2);
    assertTrue(ncfb[0].block.getBlockName()
        .equalsIgnoreCase(cfb[1].block.getBlockName()));

    corruptFileBlocks =
      namenode.getNamesystem()
        .listCorruptFileBlocks("/corruptData", cookie);
    numCorrupt = corruptFileBlocks.size();
    assertTrue(numCorrupt == 0);
    // Do a listing on a dir which doesn't have any corrupt blocks and
    // validate
    util.createFiles(fs, "/goodData");
    corruptFileBlocks = 
      namenode.getNamesystem().listCorruptFileBlocks("/goodData", null);
    numCorrupt = corruptFileBlocks.size();
    assertTrue(numCorrupt == 0);
    util.cleanup(fs, "/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 7
Source File: TestListCorruptFileBlocks.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * test listCorruptFileBlocks in DistributedFileSystem
 */
@Test (timeout=300000)
public void testlistCorruptFileBlocksDFS() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1); // datanode scans
                                                         // directories
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).
        setMaxLevels(1).setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData");

    RemoteIterator<Path> corruptFileBlocks = 
      dfs.listCorruptFileBlocks(new Path("/corruptData"));
    int numCorrupt = countPaths(corruptFileBlocks);
    assertTrue(numCorrupt == 0);
    // delete the blocks
    String bpid = cluster.getNamesystem().getBlockPoolId();
    // For loop through number of datadirectories per datanode (2)
    for (int i = 0; i < 2; i++) {
      File storageDir = cluster.getInstanceStorageDir(0, i);
      File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
      List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
          data_dir);
      if (metadataFiles == null)
        continue;
      // assertTrue("Blocks do not exist in data-dir", (blocks != null) &&
      // (blocks.length > 0));
      for (File metadataFile : metadataFiles) {
        File blockFile = Block.metaToBlockFile(metadataFile);
        LOG.info("Deliberately removing file " + blockFile.getName());
        assertTrue("Cannot remove file.", blockFile.delete());
        LOG.info("Deliberately removing file " + metadataFile.getName());
        assertTrue("Cannot remove file.", metadataFile.delete());
        // break;
      }
    }

    int count = 0;
    corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
    numCorrupt = countPaths(corruptFileBlocks);
    while (numCorrupt < 3) {
      Thread.sleep(1000);
      corruptFileBlocks = dfs.listCorruptFileBlocks(new Path("/corruptData"));
      numCorrupt = countPaths(corruptFileBlocks);
      count++;
      if (count > 30)
        break;
    }
    // Validate we get all the corrupt files
    LOG.info("Namenode has bad files. " + numCorrupt);
    assertTrue(numCorrupt == 3);

    util.cleanup(fs, "/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 8
Source File: TestListCorruptFileBlocks.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Test if NN.listCorruptFiles() returns the right number of results.
 * The corrupt blocks are detected by the BlockPoolSliceScanner.
 * Also, test that DFS.listCorruptFileBlocks can make multiple successive
 * calls.
 */
@Test (timeout=300000)
public void testMaxCorruptFiles() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 3 * 1000); // datanode sends block reports
    cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    final int maxCorruptFileBlocks = 
      FSNamesystem.DEFAULT_MAX_CORRUPT_FILEBLOCKS_RETURNED;

    // create 110 files with one block each
    DFSTestUtil util = new DFSTestUtil.Builder().setName("testMaxCorruptFiles").
        setNumFiles(maxCorruptFileBlocks * 3).setMaxLevels(1).setMaxSize(512).
        build();
    util.createFiles(fs, "/srcdat2", (short) 1);
    util.waitReplication(fs, "/srcdat2", (short) 1);

    // verify that there are no bad blocks.
    final NameNode namenode = cluster.getNameNode();
    Collection<FSNamesystem.CorruptFileBlockInfo> badFiles = namenode.
      getNamesystem().listCorruptFileBlocks("/srcdat2", null);
    assertTrue("Namenode has " + badFiles.size() + " corrupt files. Expecting none.",
        badFiles.size() == 0);

    // Now deliberately blocks from all files
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i=0; i<4; i++) {
      for (int j=0; j<=1; j++) {
        File storageDir = cluster.getInstanceStorageDir(i, j);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        LOG.info("Removing files from " + data_dir);
        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
            data_dir);
        if (metadataFiles == null)
          continue;
        for (File metadataFile : metadataFiles) {
          File blockFile = Block.metaToBlockFile(metadataFile);
          assertTrue("Cannot remove file.", blockFile.delete());
          assertTrue("Cannot remove file.", metadataFile.delete());
        }
      }
    }

    // Occasionally the BlockPoolSliceScanner can run before we have removed
    // the blocks. Restart the Datanode to trigger the scanner into running
    // once more.
    LOG.info("Restarting Datanode to trigger BlockPoolSliceScanner");
    cluster.restartDataNodes();
    cluster.waitActive();

    badFiles = 
      namenode.getNamesystem().listCorruptFileBlocks("/srcdat2", null);
      
     while (badFiles.size() < maxCorruptFileBlocks) {
      LOG.info("# of corrupt files is: " + badFiles.size());
      Thread.sleep(10000);
      badFiles = namenode.getNamesystem().
        listCorruptFileBlocks("/srcdat2", null);
    }
    badFiles = namenode.getNamesystem().
      listCorruptFileBlocks("/srcdat2", null); 
    LOG.info("Namenode has bad files. " + badFiles.size());
    assertTrue("Namenode has " + badFiles.size() + " bad files. Expecting " + 
        maxCorruptFileBlocks + ".",
        badFiles.size() == maxCorruptFileBlocks);

    CorruptFileBlockIterator iter = (CorruptFileBlockIterator)
      fs.listCorruptFileBlocks(new Path("/srcdat2"));
    int corruptPaths = countPaths(iter);
    assertTrue("Expected more than " + maxCorruptFileBlocks +
               " corrupt file blocks but got " + corruptPaths,
               corruptPaths > maxCorruptFileBlocks);
    assertTrue("Iterator should have made more than 1 call but made " +
               iter.getCallsMade(),
               iter.getCallsMade() > 1);

    util.cleanup(fs, "/srcdat2");
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example 9
Source File: TestFsck.java    From big-c with Apache License 2.0 4 votes vote down vote up
/** check if option -list-corruptfiles of fsck command works properly */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
  Configuration conf = new Configuration();
  conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
  conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
  FileSystem fs = null;

  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
    DFSTestUtil util = new DFSTestUtil.Builder().
        setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
        setMaxSize(1024).build();
    util.createFiles(fs, "/corruptData", (short) 1);
    util.waitReplication(fs, "/corruptData", (short) 1);

    // String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
    String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
    System.out.println("1. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    // delete the blocks
    final String bpid = cluster.getNamesystem().getBlockPoolId();
    for (int i=0; i<4; i++) {
      for (int j=0; j<=1; j++) {
        File storageDir = cluster.getInstanceStorageDir(i, j);
        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
        List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
            data_dir);
        if (metadataFiles == null)
          continue;
        for (File metadataFile : metadataFiles) {
          File blockFile = Block.metaToBlockFile(metadataFile);
          assertTrue("Cannot remove file.", blockFile.delete());
          assertTrue("Cannot remove file.", metadataFile.delete());
        }
      }
    }

    // wait for the namenode to see the corruption
    final NamenodeProtocols namenode = cluster.getNameNodeRpc();
    CorruptFileBlocks corruptFileBlocks = namenode
        .listCorruptFileBlocks("/corruptData", null);
    int numCorrupt = corruptFileBlocks.getFiles().length;
    while (numCorrupt == 0) {
      Thread.sleep(1000);
      corruptFileBlocks = namenode
          .listCorruptFileBlocks("/corruptData", null);
      numCorrupt = corruptFileBlocks.getFiles().length;
    }
    outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
    System.out.println("2. bad fsck out: " + outStr);
    assertTrue(outStr.contains("has 3 CORRUPT files"));

    // Do a listing on a dir which doesn't have any corrupt blocks and validate
    util.createFiles(fs, "/goodData");
    outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
    System.out.println("3. good fsck out: " + outStr);
    assertTrue(outStr.contains("has 0 CORRUPT files"));
    util.cleanup(fs,"/corruptData");
    util.cleanup(fs, "/goodData");
  } finally {
    if (cluster != null) {cluster.shutdown();}
  }
}
 
Example 10
Source File: TestDeleteBlockPool.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testDfsAdminDeleteBlockPool() throws Exception {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = null;
  try {
    conf.set(DFSConfigKeys.DFS_NAMESERVICES,
        "namesServerId1,namesServerId2");
    cluster = new MiniDFSCluster.Builder(conf)
      .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
          conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
      .numDataNodes(1).build();

    cluster.waitActive();

    FileSystem fs1 = cluster.getFileSystem(0);
    FileSystem fs2 = cluster.getFileSystem(1);

    DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 1, 54);
    DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 1, 54);

    DataNode dn1 = cluster.getDataNodes().get(0);

    String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
    String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
    
    File dn1StorageDir1 = cluster.getInstanceStorageDir(0, 0);
    File dn1StorageDir2 = cluster.getInstanceStorageDir(0, 1);
    
    Configuration nn1Conf = cluster.getConfiguration(0);
    nn1Conf.set(DFSConfigKeys.DFS_NAMESERVICES, "namesServerId1");
    dn1.refreshNamenodes(nn1Conf);
    assertEquals(1, dn1.getAllBpOs().length);
    
    DFSAdmin admin = new DFSAdmin(nn1Conf);
    String dn1Address = dn1.getDatanodeId().getIpAddr() + ":" + dn1.getIpcPort();
    String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
    
    int ret = admin.run(args);
    assertFalse(0 == ret);

    verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
    verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
    
    String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
    ret = admin.run(forceArgs);
    assertEquals(0, ret);
    
    verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
    verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
    
    //bpid1 remains good
    verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
    verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
    
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}