Java Code Examples for org.apache.hadoop.test.PathUtils

The following examples show how to use org.apache.hadoop.test.PathUtils. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop-ozone   Source File: TestHddsServerUtils.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test {@link ServerUtils#getScmDbDir}.
 */
@Test
public void testGetScmDbDir() {
  final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
  final File dbDir = new File(testDir, "scmDbDir");
  final File metaDir = new File(testDir, "metaDir");   // should be ignored.
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath());
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());

  try {
    assertEquals(dbDir, ServerUtils.getScmDbDir(conf));
    assertTrue(dbDir.exists());          // should have been created.
  } finally {
    FileUtils.deleteQuietly(dbDir);
  }
}
 
Example 2
Source Project: hadoop-ozone   Source File: TestServerUtils.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test {@link ServerUtils#getScmDbDir}.
 */
@Test
public void testGetScmDbDir() {
  final File testDir = PathUtils.getTestDir(TestServerUtils.class);
  final File dbDir = new File(testDir, "scmDbDir");
  final File metaDir = new File(testDir, "metaDir");
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath());
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());

  try {
    assertFalse(metaDir.exists());
    assertFalse(dbDir.exists());
    assertEquals(dbDir, ServerUtils.getScmDbDir(conf));
    assertTrue(dbDir.exists());
    assertFalse(metaDir.exists());
  } finally {
    FileUtils.deleteQuietly(dbDir);
  }
}
 
Example 3
Source Project: hadoop   Source File: TestClusterId.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws IOException {
  ExitUtil.disableSystemExit();

  String baseDir = PathUtils.getTestDirName(getClass());

  hdfsDir = new File(baseDir, "dfs/name");
  if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
    throw new IOException("Could not delete test directory '" + hdfsDir + "'");
  }
  LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());

  // as some tests might change these values we reset them to defaults before
  // every test
  StartupOption.FORMAT.setForceFormat(false);
  StartupOption.FORMAT.setInteractiveFormat(true);
  
  config = new Configuration();
  config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
 
Example 4
Source Project: hadoop   Source File: TestDecommission.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws IOException {
  conf = new HdfsConfiguration();
  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
  hostsFile = new Path(dir, "hosts");
  excludeFile = new Path(dir, "exclude");
  
  // Setup conf
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
  conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL_MSEC);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, NAMENODE_REPLICATION_INTERVAL);

  writeConfigFile(hostsFile, null);
  writeConfigFile(excludeFile, null);
}
 
Example 5
Source Project: big-c   Source File: TestClusterId.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws IOException {
  ExitUtil.disableSystemExit();

  String baseDir = PathUtils.getTestDirName(getClass());

  hdfsDir = new File(baseDir, "dfs/name");
  if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
    throw new IOException("Could not delete test directory '" + hdfsDir + "'");
  }
  LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());

  // as some tests might change these values we reset them to defaults before
  // every test
  StartupOption.FORMAT.setForceFormat(false);
  StartupOption.FORMAT.setInteractiveFormat(true);
  
  config = new Configuration();
  config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
 
Example 6
Source Project: big-c   Source File: TestDecommission.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws IOException {
  conf = new HdfsConfiguration();
  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
  hostsFile = new Path(dir, "hosts");
  excludeFile = new Path(dir, "exclude");
  
  // Setup conf
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
  conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL_MSEC);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, NAMENODE_REPLICATION_INTERVAL);

  writeConfigFile(hostsFile, null);
  writeConfigFile(excludeFile, null);
}
 
Example 7
Source Project: vespa   Source File: MapReduceTest.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUp() throws IOException {
    hdfsBaseDir = new File(PathUtils.getTestDir(MapReduceTest.class).getCanonicalPath());

    conf = new HdfsConfiguration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsBaseDir.getAbsolutePath());
    conf.set(VespaConfiguration.DRYRUN, "true");
    conf.set(VespaConfiguration.ENDPOINT, "endpoint-does-not-matter-in-dryrun");

    cluster = new MiniDFSCluster.Builder(conf).build();
    hdfs = FileSystem.get(conf);

    metricsJsonPath = new Path("metrics_json");
    metricsCsvPath = new Path("metrics_csv");
    copyToHdfs("src/test/resources/operations_data.json", metricsJsonPath, "data");
    copyToHdfs("src/test/resources/tabular_data.csv", metricsCsvPath, "data");
}
 
Example 8
Source Project: hadoop-ozone   Source File: TestHddsServerUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test {@link ServerUtils#getScmDbDir} with fallback to OZONE_METADATA_DIRS
 * when OZONE_SCM_DB_DIRS is undefined.
 */
@Test
public void testGetScmDbDirWithFallback() {
  final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
  final File metaDir = new File(testDir, "metaDir");
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
  try {
    assertEquals(metaDir, ServerUtils.getScmDbDir(conf));
    assertTrue(metaDir.exists());        // should have been created.
  } finally {
    FileUtils.deleteQuietly(metaDir);
  }
}
 
Example 9
Source Project: hadoop-ozone   Source File: TestEndPoint.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  serverAddress = SCMTestUtils.getReuseableAddress();
  scmServerImpl = new ScmTestMock();
  scmServer = SCMTestUtils.startScmRpcServer(SCMTestUtils.getConf(),
      scmServerImpl, serverAddress, 10);
  testDir = PathUtils.getTestDir(TestEndPoint.class);
  config = SCMTestUtils.getConf();
  config.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath());
  config.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
  config
      .setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
  config.set(HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL, "1s");
}
 
Example 10
Source Project: hadoop-ozone   Source File: TestServerUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test {@link ServerUtils#getScmDbDir} with fallback to OZONE_METADATA_DIRS
 * when OZONE_SCM_DB_DIRS is undefined.
 */
@Test
public void testGetScmDbDirWithFallback() {
  final File testDir = PathUtils.getTestDir(TestServerUtils.class);
  final File metaDir = new File(testDir, "metaDir");
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
  try {
    assertFalse(metaDir.exists());
    assertEquals(metaDir, ServerUtils.getScmDbDir(conf));
    assertTrue(metaDir.exists());
  } finally {
    FileUtils.deleteQuietly(metaDir);
  }
}
 
Example 11
Source Project: hadoop-ozone   Source File: TestServerUtils.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void ozoneMetadataDirAcceptsSingleItem() {
  final File testDir = PathUtils.getTestDir(TestServerUtils.class);
  final File metaDir = new File(testDir, "metaDir");
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());

  try {
    assertFalse(metaDir.exists());
    assertEquals(metaDir, ServerUtils.getOzoneMetaDirPath(conf));
    assertTrue(metaDir.exists());
  } finally {
    FileUtils.deleteQuietly(metaDir);
  }
}
 
Example 12
Source Project: hadoop   Source File: TestCheckpoint.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Checks that an IOException in NNStorage.writeTransactionIdFile is handled
 * correctly (by removing the storage directory)
 * See https://issues.apache.org/jira/browse/HDFS-2011
 */
@Test
public void testWriteTransactionIdHandlesIOE() throws Exception {
  LOG.info("Check IOException handled correctly by writeTransactionIdFile");
  ArrayList<URI> fsImageDirs = new ArrayList<URI>();
  ArrayList<URI> editsDirs = new ArrayList<URI>();
  File filePath =
    new File(PathUtils.getTestDir(getClass()), "storageDirToCheck");
  assertTrue("Couldn't create directory storageDirToCheck",
             filePath.exists() || filePath.mkdirs());
  fsImageDirs.add(filePath.toURI());
  editsDirs.add(filePath.toURI());
  NNStorage nnStorage = new NNStorage(new HdfsConfiguration(),
    fsImageDirs, editsDirs);
  try {
    assertTrue("List of storage directories didn't have storageDirToCheck.",
               nnStorage.getEditsDirectories().iterator().next().
               toString().indexOf("storageDirToCheck") != -1);
    assertTrue("List of removed storage directories wasn't empty",
               nnStorage.getRemovedStorageDirs().isEmpty());
  } finally {
    // Delete storage directory to cause IOException in writeTransactionIdFile 
    assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(),
               filePath.delete());
  }
  // Just call writeTransactionIdFile using any random number
  nnStorage.writeTransactionIdFileToStorage(1);
  List<StorageDirectory> listRsd = nnStorage.getRemovedStorageDirs();
  assertTrue("Removed directory wasn't what was expected",
             listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().
             toString().indexOf("storageDirToCheck") != -1);
  nnStorage.close();
}
 
Example 13
Source Project: hadoop   Source File: TestFSImage.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * In this test case, I have created an image with a file having
 * preferredblockSize = 0. We are trying to read this image (since file with
 * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
 * after 2.6 version will not be able to read this particular file.
 * See HDFS-7788 for more information.
 * @throws Exception
 */
@Test
public void testZeroBlockSize() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-zero-block-size");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));
  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
      nameDir.getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(false)
      .manageDataDfsDirs(false)
      .manageNameDfsDirs(false)
      .waitSafeMode(false)
      .startupOption(StartupOption.UPGRADE)
      .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/zeroBlockFile");
    assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
    assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
  } finally {
    cluster.shutdown();
    //Clean up
    FileUtil.fullyDelete(dfsDir);
  }
}
 
Example 14
Source Project: hadoop   Source File: TestReplicationPolicyWithNodeGroup.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
  CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  // Set properties to make HDFS aware of NodeGroup.
  CONF.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, 
      BlockPlacementPolicyWithNodeGroup.class.getName());
  CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, 
      NetworkTopologyWithNodeGroup.class.getName());
  
  CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
  
  File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
  
  CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  
  DFSTestUtil.formatNameNode(CONF);
  namenode = new NameNode(CONF);
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  replicator = bm.getBlockPlacementPolicy();
  cluster = bm.getDatanodeManager().getNetworkTopology();
  // construct network topology
  for(int i=0; i<NUM_OF_DATANODES; i++) {
    cluster.add(dataNodes[i]);
  }
  setupDataNodeCapacity();
}
 
Example 15
Source Project: hadoop   Source File: TestFileAppendRestart.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Earlier versions of HDFS had a bug (HDFS-2991) which caused
 * append(), when called exactly at a block boundary,
 * to not log an OP_ADD. This ensures that we can read from
 * such buggy versions correctly, by loading an image created
 * using a namesystem image created with 0.23.1-rc2 exhibiting
 * the issue.
 */
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
  final Configuration conf = new HdfsConfiguration();

  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-buggy-append");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(0)
    .waitSafeMode(false)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/io_data/test_io_0");
    assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
  } finally {
    cluster.shutdown();
  }
}
 
Example 16
Source Project: hadoop   Source File: TestFSInputChecker.java    License: Apache License 2.0 5 votes vote down vote up
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
  // create a file and verify that checksum corruption results in 
  // a checksum exception on LocalFS
  
  String dir = PathUtils.getTestDirName(getClass());
  Path file = new Path(dir + "/corruption-test.dat");
  Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
  
  writeFile(fileSys, file);
  
  int fileLen = (int)fileSys.getFileStatus(file).getLen();
  
  byte [] buf = new byte[fileLen];

  InputStream in = fileSys.open(file);
  IOUtils.readFully(in, buf, 0, buf.length);
  in.close();
  
  // check .crc corruption
  checkFileCorruption(fileSys, file, crcFile);
  fileSys.delete(file, true);
  
  writeFile(fileSys, file);
  
  // check data corrutpion
  checkFileCorruption(fileSys, file, file);
  
  fileSys.delete(file, true);
}
 
Example 17
Source Project: big-c   Source File: TestCheckpoint.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Checks that an IOException in NNStorage.writeTransactionIdFile is handled
 * correctly (by removing the storage directory)
 * See https://issues.apache.org/jira/browse/HDFS-2011
 */
@Test
public void testWriteTransactionIdHandlesIOE() throws Exception {
  LOG.info("Check IOException handled correctly by writeTransactionIdFile");
  ArrayList<URI> fsImageDirs = new ArrayList<URI>();
  ArrayList<URI> editsDirs = new ArrayList<URI>();
  File filePath =
    new File(PathUtils.getTestDir(getClass()), "storageDirToCheck");
  assertTrue("Couldn't create directory storageDirToCheck",
             filePath.exists() || filePath.mkdirs());
  fsImageDirs.add(filePath.toURI());
  editsDirs.add(filePath.toURI());
  NNStorage nnStorage = new NNStorage(new HdfsConfiguration(),
    fsImageDirs, editsDirs);
  try {
    assertTrue("List of storage directories didn't have storageDirToCheck.",
               nnStorage.getEditsDirectories().iterator().next().
               toString().indexOf("storageDirToCheck") != -1);
    assertTrue("List of removed storage directories wasn't empty",
               nnStorage.getRemovedStorageDirs().isEmpty());
  } finally {
    // Delete storage directory to cause IOException in writeTransactionIdFile 
    assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(),
               filePath.delete());
  }
  // Just call writeTransactionIdFile using any random number
  nnStorage.writeTransactionIdFileToStorage(1);
  List<StorageDirectory> listRsd = nnStorage.getRemovedStorageDirs();
  assertTrue("Removed directory wasn't what was expected",
             listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().
             toString().indexOf("storageDirToCheck") != -1);
  nnStorage.close();
}
 
Example 18
Source Project: big-c   Source File: TestFSImage.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * In this test case, I have created an image with a file having
 * preferredblockSize = 0. We are trying to read this image (since file with
 * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
 * after 2.6 version will not be able to read this particular file.
 * See HDFS-7788 for more information.
 * @throws Exception
 */
@Test
public void testZeroBlockSize() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-zero-block-size");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));
  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
      nameDir.getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(false)
      .manageDataDfsDirs(false)
      .manageNameDfsDirs(false)
      .waitSafeMode(false)
      .startupOption(StartupOption.UPGRADE)
      .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/zeroBlockFile");
    assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
    assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
  } finally {
    cluster.shutdown();
    //Clean up
    FileUtil.fullyDelete(dfsDir);
  }
}
 
Example 19
Source Project: big-c   Source File: TestReplicationPolicyWithNodeGroup.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
  CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  // Set properties to make HDFS aware of NodeGroup.
  CONF.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, 
      BlockPlacementPolicyWithNodeGroup.class.getName());
  CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, 
      NetworkTopologyWithNodeGroup.class.getName());
  
  CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
  
  File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
  
  CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  
  DFSTestUtil.formatNameNode(CONF);
  namenode = new NameNode(CONF);
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  replicator = bm.getBlockPlacementPolicy();
  cluster = bm.getDatanodeManager().getNetworkTopology();
  // construct network topology
  for(int i=0; i<NUM_OF_DATANODES; i++) {
    cluster.add(dataNodes[i]);
  }
  setupDataNodeCapacity();
}
 
Example 20
Source Project: big-c   Source File: TestFileAppendRestart.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Earlier versions of HDFS had a bug (HDFS-2991) which caused
 * append(), when called exactly at a block boundary,
 * to not log an OP_ADD. This ensures that we can read from
 * such buggy versions correctly, by loading an image created
 * using a namesystem image created with 0.23.1-rc2 exhibiting
 * the issue.
 */
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
  final Configuration conf = new HdfsConfiguration();

  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-buggy-append");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(0)
    .waitSafeMode(false)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/io_data/test_io_0");
    assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
  } finally {
    cluster.shutdown();
  }
}
 
Example 21
Source Project: big-c   Source File: TestFSInputChecker.java    License: Apache License 2.0 5 votes vote down vote up
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
  // create a file and verify that checksum corruption results in 
  // a checksum exception on LocalFS
  
  String dir = PathUtils.getTestDirName(getClass());
  Path file = new Path(dir + "/corruption-test.dat");
  Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
  
  writeFile(fileSys, file);
  
  int fileLen = (int)fileSys.getFileStatus(file).getLen();
  
  byte [] buf = new byte[fileLen];

  InputStream in = fileSys.open(file);
  IOUtils.readFully(in, buf, 0, buf.length);
  in.close();
  
  // check .crc corruption
  checkFileCorruption(fileSys, file, crcFile);
  fileSys.delete(file, true);
  
  writeFile(fileSys, file);
  
  // check data corrutpion
  checkFileCorruption(fileSys, file, file);
  
  fileSys.delete(file, true);
}
 
Example 22
Source Project: hadoop-ozone   Source File: TestContainerPlacement.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Test capacity based container placement policy with node reports.
 *
 * @throws IOException
 * @throws InterruptedException
 * @throws TimeoutException
 */
@Test
@Ignore
public void testContainerPlacementCapacity() throws IOException,
    InterruptedException, TimeoutException {
  OzoneConfiguration conf = getConf();
  final int nodeCount = 4;
  final long capacity = 10L * OzoneConsts.GB;
  final long used = 2L * OzoneConsts.GB;
  final long remaining = capacity - used;

  final File testDir = PathUtils.getTestDir(
      TestContainerPlacement.class);
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
      testDir.getAbsolutePath());
  conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
      SCMContainerPlacementCapacity.class, PlacementPolicy.class);

  SCMNodeManager nodeManager = createNodeManager(conf);
  SCMContainerManager containerManager =
      createContainerManager(conf, nodeManager);
  List<DatanodeDetails> datanodes =
      TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount);
  XceiverClientManager xceiverClientManager = null;
  try {
    for (DatanodeDetails datanodeDetails : datanodes) {
      nodeManager.processHeartbeat(datanodeDetails);
    }

    //TODO: wait for heartbeat to be processed
    Thread.sleep(4 * 1000);
    assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
    assertEquals(capacity * nodeCount,
        (long) nodeManager.getStats().getCapacity().get());
    assertEquals(used * nodeCount,
        (long) nodeManager.getStats().getScmUsed().get());
    assertEquals(remaining * nodeCount,
        (long) nodeManager.getStats().getRemaining().get());

    xceiverClientManager= new XceiverClientManager(conf);

    ContainerInfo container = containerManager
        .allocateContainer(
            SCMTestUtils.getReplicationType(conf),
            SCMTestUtils.getReplicationFactor(conf),
            OzoneConsts.OZONE);
    assertEquals(SCMTestUtils.getReplicationFactor(conf).getNumber(),
        containerManager.getContainerReplicas(
            container.containerID()).size());
  } finally {
    IOUtils.closeQuietly(containerManager);
    IOUtils.closeQuietly(nodeManager);
    if (xceiverClientManager != null) {
      xceiverClientManager.close();
    }
    FileUtil.fullyDelete(testDir);
  }
}
 
Example 23
Source Project: hadoop-ozone   Source File: TestSCMNodeManager.java    License: Apache License 2.0 4 votes vote down vote up
@Before
public void setup() {
  testDir = PathUtils.getTestDir(
      TestSCMNodeManager.class);
}
 
Example 24
Source Project: hadoop   Source File: TestDFSShell.java    License: Apache License 2.0 4 votes vote down vote up
@Test (timeout = 30000)
public void testURIPaths() throws Exception {
  Configuration srcConf = new HdfsConfiguration();
  Configuration dstConf = new HdfsConfiguration();
  MiniDFSCluster srcCluster =  null;
  MiniDFSCluster dstCluster = null;
  File bak = new File(PathUtils.getTestDir(getClass()), "dfs_tmp_uri");
  bak.mkdirs();
  try{
    srcCluster = new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build();
    dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, bak.getAbsolutePath());
    dstCluster = new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
    FileSystem srcFs = srcCluster.getFileSystem();
    FileSystem dstFs = dstCluster.getFileSystem();
    FsShell shell = new FsShell();
    shell.setConf(srcConf);
    //check for ls
    String[] argv = new String[2];
    argv[0] = "-ls";
    argv[1] = dstFs.getUri().toString() + "/";
    int ret = ToolRunner.run(shell, argv);
    assertEquals("ls works on remote uri ", 0, ret);
    //check for rm -r 
    dstFs.mkdirs(new Path("/hadoopdir"));
    argv = new String[2];
    argv[0] = "-rmr";
    argv[1] = dstFs.getUri().toString() + "/hadoopdir";
    ret = ToolRunner.run(shell, argv);
    assertEquals("-rmr works on remote uri " + argv[1], 0, ret);
    //check du 
    argv[0] = "-du";
    argv[1] = dstFs.getUri().toString() + "/";
    ret = ToolRunner.run(shell, argv);
    assertEquals("du works on remote uri ", 0, ret);
    //check put
    File furi = new File(TEST_ROOT_DIR, "furi");
    createLocalFile(furi);
    argv = new String[3];
    argv[0] = "-put";
    argv[1] = furi.toURI().toString();
    argv[2] = dstFs.getUri().toString() + "/furi";
    ret = ToolRunner.run(shell, argv);
    assertEquals(" put is working ", 0, ret);
    //check cp 
    argv[0] = "-cp";
    argv[1] = dstFs.getUri().toString() + "/furi";
    argv[2] = srcFs.getUri().toString() + "/furi";
    ret = ToolRunner.run(shell, argv);
    assertEquals(" cp is working ", 0, ret);
    assertTrue(srcFs.exists(new Path("/furi")));
    //check cat 
    argv = new String[2];
    argv[0] = "-cat";
    argv[1] = dstFs.getUri().toString() + "/furi";
    ret = ToolRunner.run(shell, argv);
    assertEquals(" cat is working ", 0, ret);
    //check chown
    dstFs.delete(new Path("/furi"), true);
    dstFs.delete(new Path("/hadoopdir"), true);
    String file = "/tmp/chownTest";
    Path path = new Path(file);
    Path parent = new Path("/tmp");
    Path root = new Path("/");
    TestDFSShell.writeFile(dstFs, path);
    runCmd(shell, "-chgrp", "-R", "herbivores", dstFs.getUri().toString() +"/*");
    confirmOwner(null, "herbivores", dstFs, parent, path);
    runCmd(shell, "-chown", "-R", ":reptiles", dstFs.getUri().toString() + "/");
    confirmOwner(null, "reptiles", dstFs, root, parent, path);
    //check if default hdfs:/// works 
    argv[0] = "-cat";
    argv[1] = "hdfs:///furi";
    ret = ToolRunner.run(shell, argv);
    assertEquals(" default works for cat", 0, ret);
    argv[0] = "-ls";
    argv[1] = "hdfs:///";
    ret = ToolRunner.run(shell, argv);
    assertEquals("default works for ls ", 0, ret);
    argv[0] = "-rmr";
    argv[1] = "hdfs:///furi";
    ret = ToolRunner.run(shell, argv);
    assertEquals("default works for rm/rmr", 0, ret);
  } finally {
    if (null != srcCluster) {
      srcCluster.shutdown();
    }
    if (null != dstCluster) {
      dstCluster.shutdown();
    }
  }
}
 
Example 25
Source Project: hadoop   Source File: TestBlockStoragePolicy.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseTargetWithTopology() throws Exception {
  BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
  BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2",
      new StorageType[]{StorageType.DISK, StorageType.SSD,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] types = {StorageType.DISK, StorageType.SSD,
      StorageType.ARCHIVE};

  final DatanodeStorageInfo[] storages = DFSTestUtil
      .createDatanodeStorageInfos(3, racks, hosts, types);
  final DatanodeDescriptor[] dataNodes = DFSTestUtil
      .toDatanodeDescriptor(storages);

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy1);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
  targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy2);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
}
 
Example 26
Source Project: hadoop   Source File: TestBlockStoragePolicy.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseSsdOverDisk() throws Exception {
  BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};

  final DatanodeStorageInfo[] diskStorages
      = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
  final DatanodeDescriptor[] dataNodes
      = DFSTestUtil.toDatanodeDescriptor(diskStorages);
  for(int i = 0; i < dataNodes.length; i++) {
    BlockManagerTestUtil.updateStorage(dataNodes[i],
        new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
            StorageType.SSD));
  }

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy);
  System.out.println(policy.getName() + ": " + Arrays.asList(targets));
  Assert.assertEquals(2, targets.length);
  Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
  Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
 
Example 27
Source Project: hadoop   Source File: TestEditLogJournalFailures.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void testMultipleRedundantFailedEditsDirOnSetReadyToFlush()
    throws IOException {
  // Set up 4 name/edits dirs.
  shutDownMiniCluster();
  Configuration conf = new HdfsConfiguration();
  String[] nameDirs = new String[4];
  for (int i = 0; i < nameDirs.length; i++) {
    File nameDir = new File(PathUtils.getTestDir(getClass()), "name-dir" + i);
    nameDir.mkdirs();
    nameDirs[i] = nameDir.getAbsolutePath();
  }
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      StringUtils.join(nameDirs, ","));
  
  // Keep running unless there are less than 2 edits dirs remaining.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, 2);
  setUpMiniCluster(conf, false);
  
  // All journals active.
  assertTrue(doAnEdit());
  // The NN has not terminated (no ExitException thrown)
  
  // Invalidate 1/4 of the redundant journals.
  invalidateEditsDirAtIndex(0, false, false);
  assertTrue(doAnEdit());
  // The NN has not terminated (no ExitException thrown)

  // Invalidate 2/4 of the redundant journals.
  invalidateEditsDirAtIndex(1, false, false);
  assertTrue(doAnEdit());
  // The NN has not terminated (no ExitException thrown)
  
  // Invalidate 3/4 of the redundant journals.
  invalidateEditsDirAtIndex(2, false, false);

  try {
    doAnEdit();
    fail("A failure of more than the minimum number of redundant journals "
        + "should have halted ");
  } catch (RemoteException re) {
    assertTrue(re.getClassName().contains("ExitException"));
    GenericTestUtils.assertExceptionContains(
        "Could not sync enough journals to persistent storage due to " +
        "setReadyToFlush failed for too many journals. " +
        "Unsynced transactions: 1", re);
  }
}
 
Example 28
Source Project: hadoop   Source File: TestStartup.java    License: Apache License 2.0 4 votes vote down vote up
@Test
public void testCompression() throws IOException {
  LOG.info("Test compressing image.");
  Configuration conf = new Configuration();
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
  File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(base_dir, "name").getPath());
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);

  DFSTestUtil.formatNameNode(conf);

  // create an uncompressed image
  LOG.info("Create an uncompressed fsimage");
  NameNode namenode = new NameNode(conf);
  namenode.getNamesystem().mkdirs("/test",
      new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
  NamenodeProtocols nnRpc = namenode.getRpcServer();
  assertTrue(nnRpc.getFileInfo("/test").isDir());
  nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  nnRpc.saveNamespace();
  namenode.stop();
  namenode.join();

  // compress image using default codec
  LOG.info("Read an uncomressed image and store it compressed using default codec.");
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
  checkNameSpace(conf);

  // read image compressed using the default and compress it using Gzip codec
  LOG.info("Read a compressed image and store it using a different codec.");
  conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
      "org.apache.hadoop.io.compress.GzipCodec");
  checkNameSpace(conf);

  // read an image compressed in Gzip and store it uncompressed
  LOG.info("Read a compressed image and store it as uncompressed.");
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
  checkNameSpace(conf);

  // read an uncomrpessed image and store it uncompressed
  LOG.info("Read an uncompressed image and store it as uncompressed.");
  checkNameSpace(conf);
}
 
Example 29
Source Project: hadoop   Source File: TestReplicationPolicyConsiderLoad.java    License: Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setupCluster() throws IOException {
  Configuration conf = new HdfsConfiguration();
  final String[] racks = {
      "/rack1",
      "/rack1",
      "/rack1",
      "/rack2",
      "/rack2",
      "/rack2"};
  storages = DFSTestUtil.createDatanodeStorageInfos(racks);
  dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
  DFSTestUtil.formatNameNode(conf);
  namenode = new NameNode(conf);
  int blockSize = 1024;

  dnrList = new ArrayList<DatanodeRegistration>();
  dnManager = namenode.getNamesystem().getBlockManager().getDatanodeManager();

  // Register DNs
  for (int i=0; i < 6; i++) {
    DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i],
        new StorageInfo(NodeType.DATA_NODE), new ExportedBlockKeys(),
        VersionInfo.getVersion());
    dnrList.add(dnr);
    dnManager.registerDatanode(dnr);
    dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L,
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L);
    dataNodes[i].updateHeartbeat(
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[i]),
        0L, 0L, 0, 0, null);
  }
}
 
Example 30
Source Project: hadoop   Source File: TestReplicationPolicy.java    License: Apache License 2.0 4 votes vote down vote up
@BeforeClass
  public static void setupCluster() throws Exception {
    Configuration conf = new HdfsConfiguration();
    final String[] racks = {
        "/d1/r1",
        "/d1/r1",
        "/d1/r2",
        "/d1/r2",
        "/d2/r3",
        "/d2/r3"};
    storages = DFSTestUtil.createDatanodeStorageInfos(racks);
    dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);

    // create an extra storage for dn5.
    DatanodeStorage extraStorage = new DatanodeStorage(
        storages[5].getStorageID() + "-extra", DatanodeStorage.State.NORMAL,
        StorageType.DEFAULT);
/*    DatanodeStorageInfo si = new DatanodeStorageInfo(
        storages[5].getDatanodeDescriptor(), extraStorage);
*/
    BlockManagerTestUtil.updateStorage(storages[5].getDatanodeDescriptor(),
        extraStorage);

    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
        new File(baseDir, "name").getPath());

    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
    DFSTestUtil.formatNameNode(conf);
    namenode = new NameNode(conf);

    final BlockManager bm = namenode.getNamesystem().getBlockManager();
    replicator = bm.getBlockPlacementPolicy();
    cluster = bm.getDatanodeManager().getNetworkTopology();
    // construct network topology
    for (int i=0; i < NUM_OF_DATANODES; i++) {
      cluster.add(dataNodes[i]);
      bm.getDatanodeManager().getHeartbeatManager().addDatanode(
          dataNodes[i]);
    }
    resetHeartbeatForStorages();
  }