org.apache.hadoop.test.PathUtils Java Examples

The following examples show how to use org.apache.hadoop.test.PathUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MapReduceTest.java    From vespa with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUp() throws IOException {
    hdfsBaseDir = new File(PathUtils.getTestDir(MapReduceTest.class).getCanonicalPath());

    conf = new HdfsConfiguration();
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsBaseDir.getAbsolutePath());
    conf.set(VespaConfiguration.DRYRUN, "true");
    conf.set(VespaConfiguration.ENDPOINT, "endpoint-does-not-matter-in-dryrun");

    cluster = new MiniDFSCluster.Builder(conf).build();
    hdfs = FileSystem.get(conf);

    metricsJsonPath = new Path("metrics_json");
    metricsCsvPath = new Path("metrics_csv");
    copyToHdfs("src/test/resources/operations_data.json", metricsJsonPath, "data");
    copyToHdfs("src/test/resources/tabular_data.csv", metricsCsvPath, "data");
}
 
Example #2
Source File: TestDecommission.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws IOException {
  conf = new HdfsConfiguration();
  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
  hostsFile = new Path(dir, "hosts");
  excludeFile = new Path(dir, "exclude");
  
  // Setup conf
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
  conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL_MSEC);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, NAMENODE_REPLICATION_INTERVAL);

  writeConfigFile(hostsFile, null);
  writeConfigFile(excludeFile, null);
}
 
Example #3
Source File: TestServerUtils.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
/**
 * Test {@link ServerUtils#getScmDbDir}.
 */
@Test
public void testGetScmDbDir() {
  final File testDir = PathUtils.getTestDir(TestServerUtils.class);
  final File dbDir = new File(testDir, "scmDbDir");
  final File metaDir = new File(testDir, "metaDir");
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath());
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());

  try {
    assertFalse(metaDir.exists());
    assertFalse(dbDir.exists());
    assertEquals(dbDir, ServerUtils.getScmDbDir(conf));
    assertTrue(dbDir.exists());
    assertFalse(metaDir.exists());
  } finally {
    FileUtils.deleteQuietly(dbDir);
  }
}
 
Example #4
Source File: TestClusterId.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws IOException {
  ExitUtil.disableSystemExit();

  String baseDir = PathUtils.getTestDirName(getClass());

  hdfsDir = new File(baseDir, "dfs/name");
  if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
    throw new IOException("Could not delete test directory '" + hdfsDir + "'");
  }
  LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());

  // as some tests might change these values we reset them to defaults before
  // every test
  StartupOption.FORMAT.setForceFormat(false);
  StartupOption.FORMAT.setInteractiveFormat(true);
  
  config = new Configuration();
  config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
 
Example #5
Source File: TestDecommission.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setup() throws IOException {
  conf = new HdfsConfiguration();
  // Set up the hosts/exclude files.
  localFileSys = FileSystem.getLocal(conf);
  Path workingDir = localFileSys.getWorkingDirectory();
  dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission");
  hostsFile = new Path(dir, "hosts");
  excludeFile = new Path(dir, "exclude");
  
  // Setup conf
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, false);
  conf.set(DFSConfigKeys.DFS_HOSTS, hostsFile.toUri().getPath());
  conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 2000);
  conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
  conf.setInt(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL_MSEC);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4);
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, NAMENODE_REPLICATION_INTERVAL);

  writeConfigFile(hostsFile, null);
  writeConfigFile(excludeFile, null);
}
 
Example #6
Source File: TestClusterId.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws IOException {
  ExitUtil.disableSystemExit();

  String baseDir = PathUtils.getTestDirName(getClass());

  hdfsDir = new File(baseDir, "dfs/name");
  if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
    throw new IOException("Could not delete test directory '" + hdfsDir + "'");
  }
  LOG.info("hdfsdir is " + hdfsDir.getAbsolutePath());

  // as some tests might change these values we reset them to defaults before
  // every test
  StartupOption.FORMAT.setForceFormat(false);
  StartupOption.FORMAT.setInteractiveFormat(true);
  
  config = new Configuration();
  config.set(DFS_NAMENODE_NAME_DIR_KEY, hdfsDir.getPath());
}
 
Example #7
Source File: TestHddsServerUtils.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
/**
 * Test {@link ServerUtils#getScmDbDir}.
 */
@Test
public void testGetScmDbDir() {
  final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
  final File dbDir = new File(testDir, "scmDbDir");
  final File metaDir = new File(testDir, "metaDir");   // should be ignored.
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath());
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());

  try {
    assertEquals(dbDir, ServerUtils.getScmDbDir(conf));
    assertTrue(dbDir.exists());          // should have been created.
  } finally {
    FileUtils.deleteQuietly(dbDir);
  }
}
 
Example #8
Source File: TestFSInputChecker.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
  // create a file and verify that checksum corruption results in 
  // a checksum exception on LocalFS
  
  String dir = PathUtils.getTestDirName(getClass());
  Path file = new Path(dir + "/corruption-test.dat");
  Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
  
  writeFile(fileSys, file);
  
  int fileLen = (int)fileSys.getFileStatus(file).getLen();
  
  byte [] buf = new byte[fileLen];

  InputStream in = fileSys.open(file);
  IOUtils.readFully(in, buf, 0, buf.length);
  in.close();
  
  // check .crc corruption
  checkFileCorruption(fileSys, file, crcFile);
  fileSys.delete(file, true);
  
  writeFile(fileSys, file);
  
  // check data corrutpion
  checkFileCorruption(fileSys, file, file);
  
  fileSys.delete(file, true);
}
 
Example #9
Source File: TestFSInputChecker.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
  // create a file and verify that checksum corruption results in 
  // a checksum exception on LocalFS
  
  String dir = PathUtils.getTestDirName(getClass());
  Path file = new Path(dir + "/corruption-test.dat");
  Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
  
  writeFile(fileSys, file);
  
  int fileLen = (int)fileSys.getFileStatus(file).getLen();
  
  byte [] buf = new byte[fileLen];

  InputStream in = fileSys.open(file);
  IOUtils.readFully(in, buf, 0, buf.length);
  in.close();
  
  // check .crc corruption
  checkFileCorruption(fileSys, file, crcFile);
  fileSys.delete(file, true);
  
  writeFile(fileSys, file);
  
  // check data corrutpion
  checkFileCorruption(fileSys, file, file);
  
  fileSys.delete(file, true);
}
 
Example #10
Source File: TestFileAppendRestart.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Earlier versions of HDFS had a bug (HDFS-2991) which caused
 * append(), when called exactly at a block boundary,
 * to not log an OP_ADD. This ensures that we can read from
 * such buggy versions correctly, by loading an image created
 * using a namesystem image created with 0.23.1-rc2 exhibiting
 * the issue.
 */
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
  final Configuration conf = new HdfsConfiguration();

  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-buggy-append");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(0)
    .waitSafeMode(false)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/io_data/test_io_0");
    assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
  } finally {
    cluster.shutdown();
  }
}
 
Example #11
Source File: TestReplicationPolicyWithNodeGroup.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
  CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  // Set properties to make HDFS aware of NodeGroup.
  CONF.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, 
      BlockPlacementPolicyWithNodeGroup.class.getName());
  CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, 
      NetworkTopologyWithNodeGroup.class.getName());
  
  CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
  
  File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
  
  CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  
  DFSTestUtil.formatNameNode(CONF);
  namenode = new NameNode(CONF);
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  replicator = bm.getBlockPlacementPolicy();
  cluster = bm.getDatanodeManager().getNetworkTopology();
  // construct network topology
  for(int i=0; i<NUM_OF_DATANODES; i++) {
    cluster.add(dataNodes[i]);
  }
  setupDataNodeCapacity();
}
 
Example #12
Source File: TestFSImage.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * In this test case, I have created an image with a file having
 * preferredblockSize = 0. We are trying to read this image (since file with
 * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
 * after 2.6 version will not be able to read this particular file.
 * See HDFS-7788 for more information.
 * @throws Exception
 */
@Test
public void testZeroBlockSize() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-zero-block-size");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));
  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
      nameDir.getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(false)
      .manageDataDfsDirs(false)
      .manageNameDfsDirs(false)
      .waitSafeMode(false)
      .startupOption(StartupOption.UPGRADE)
      .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/zeroBlockFile");
    assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
    assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
  } finally {
    cluster.shutdown();
    //Clean up
    FileUtil.fullyDelete(dfsDir);
  }
}
 
Example #13
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Checks that an IOException in NNStorage.writeTransactionIdFile is handled
 * correctly (by removing the storage directory)
 * See https://issues.apache.org/jira/browse/HDFS-2011
 */
@Test
public void testWriteTransactionIdHandlesIOE() throws Exception {
  LOG.info("Check IOException handled correctly by writeTransactionIdFile");
  ArrayList<URI> fsImageDirs = new ArrayList<URI>();
  ArrayList<URI> editsDirs = new ArrayList<URI>();
  File filePath =
    new File(PathUtils.getTestDir(getClass()), "storageDirToCheck");
  assertTrue("Couldn't create directory storageDirToCheck",
             filePath.exists() || filePath.mkdirs());
  fsImageDirs.add(filePath.toURI());
  editsDirs.add(filePath.toURI());
  NNStorage nnStorage = new NNStorage(new HdfsConfiguration(),
    fsImageDirs, editsDirs);
  try {
    assertTrue("List of storage directories didn't have storageDirToCheck.",
               nnStorage.getEditsDirectories().iterator().next().
               toString().indexOf("storageDirToCheck") != -1);
    assertTrue("List of removed storage directories wasn't empty",
               nnStorage.getRemovedStorageDirs().isEmpty());
  } finally {
    // Delete storage directory to cause IOException in writeTransactionIdFile 
    assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(),
               filePath.delete());
  }
  // Just call writeTransactionIdFile using any random number
  nnStorage.writeTransactionIdFileToStorage(1);
  List<StorageDirectory> listRsd = nnStorage.getRemovedStorageDirs();
  assertTrue("Removed directory wasn't what was expected",
             listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().
             toString().indexOf("storageDirToCheck") != -1);
  nnStorage.close();
}
 
Example #14
Source File: TestFileAppendRestart.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Earlier versions of HDFS had a bug (HDFS-2991) which caused
 * append(), when called exactly at a block boundary,
 * to not log an OP_ADD. This ensures that we can read from
 * such buggy versions correctly, by loading an image created
 * using a namesystem image created with 0.23.1-rc2 exhibiting
 * the issue.
 */
@Test
public void testLoadLogsFromBuggyEarlierVersions() throws IOException {
  final Configuration conf = new HdfsConfiguration();

  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_23_BROKEN_APPEND_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-buggy-append");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);

  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());

  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(0)
    .waitSafeMode(false)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/io_data/test_io_0");
    assertEquals(2*1024*1024, fs.getFileStatus(testPath).getLen());
  } finally {
    cluster.shutdown();
  }
}
 
Example #15
Source File: TestReplicationPolicyWithNodeGroup.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
  CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  // Set properties to make HDFS aware of NodeGroup.
  CONF.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, 
      BlockPlacementPolicyWithNodeGroup.class.getName());
  CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, 
      NetworkTopologyWithNodeGroup.class.getName());
  
  CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
  
  File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
  
  CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  
  DFSTestUtil.formatNameNode(CONF);
  namenode = new NameNode(CONF);
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  replicator = bm.getBlockPlacementPolicy();
  cluster = bm.getDatanodeManager().getNetworkTopology();
  // construct network topology
  for(int i=0; i<NUM_OF_DATANODES; i++) {
    cluster.add(dataNodes[i]);
  }
  setupDataNodeCapacity();
}
 
Example #16
Source File: TestHddsServerUtils.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Test {@link ServerUtils#getScmDbDir} with fallback to OZONE_METADATA_DIRS
 * when OZONE_SCM_DB_DIRS is undefined.
 */
@Test
public void testGetScmDbDirWithFallback() {
  final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
  final File metaDir = new File(testDir, "metaDir");
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
  try {
    assertEquals(metaDir, ServerUtils.getScmDbDir(conf));
    assertTrue(metaDir.exists());        // should have been created.
  } finally {
    FileUtils.deleteQuietly(metaDir);
  }
}
 
Example #17
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Checks that an IOException in NNStorage.writeTransactionIdFile is handled
 * correctly (by removing the storage directory)
 * See https://issues.apache.org/jira/browse/HDFS-2011
 */
@Test
public void testWriteTransactionIdHandlesIOE() throws Exception {
  LOG.info("Check IOException handled correctly by writeTransactionIdFile");
  ArrayList<URI> fsImageDirs = new ArrayList<URI>();
  ArrayList<URI> editsDirs = new ArrayList<URI>();
  File filePath =
    new File(PathUtils.getTestDir(getClass()), "storageDirToCheck");
  assertTrue("Couldn't create directory storageDirToCheck",
             filePath.exists() || filePath.mkdirs());
  fsImageDirs.add(filePath.toURI());
  editsDirs.add(filePath.toURI());
  NNStorage nnStorage = new NNStorage(new HdfsConfiguration(),
    fsImageDirs, editsDirs);
  try {
    assertTrue("List of storage directories didn't have storageDirToCheck.",
               nnStorage.getEditsDirectories().iterator().next().
               toString().indexOf("storageDirToCheck") != -1);
    assertTrue("List of removed storage directories wasn't empty",
               nnStorage.getRemovedStorageDirs().isEmpty());
  } finally {
    // Delete storage directory to cause IOException in writeTransactionIdFile 
    assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(),
               filePath.delete());
  }
  // Just call writeTransactionIdFile using any random number
  nnStorage.writeTransactionIdFileToStorage(1);
  List<StorageDirectory> listRsd = nnStorage.getRemovedStorageDirs();
  assertTrue("Removed directory wasn't what was expected",
             listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().
             toString().indexOf("storageDirToCheck") != -1);
  nnStorage.close();
}
 
Example #18
Source File: TestEndPoint.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  serverAddress = SCMTestUtils.getReuseableAddress();
  scmServerImpl = new ScmTestMock();
  scmServer = SCMTestUtils.startScmRpcServer(SCMTestUtils.getConf(),
      scmServerImpl, serverAddress, 10);
  testDir = PathUtils.getTestDir(TestEndPoint.class);
  config = SCMTestUtils.getConf();
  config.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath());
  config.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
  config
      .setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
  config.set(HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL, "1s");
}
 
Example #19
Source File: TestServerUtils.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Test {@link ServerUtils#getScmDbDir} with fallback to OZONE_METADATA_DIRS
 * when OZONE_SCM_DB_DIRS is undefined.
 */
@Test
public void testGetScmDbDirWithFallback() {
  final File testDir = PathUtils.getTestDir(TestServerUtils.class);
  final File metaDir = new File(testDir, "metaDir");
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
  try {
    assertFalse(metaDir.exists());
    assertEquals(metaDir, ServerUtils.getScmDbDir(conf));
    assertTrue(metaDir.exists());
  } finally {
    FileUtils.deleteQuietly(metaDir);
  }
}
 
Example #20
Source File: TestServerUtils.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@Test
public void ozoneMetadataDirAcceptsSingleItem() {
  final File testDir = PathUtils.getTestDir(TestServerUtils.class);
  final File metaDir = new File(testDir, "metaDir");
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());

  try {
    assertFalse(metaDir.exists());
    assertEquals(metaDir, ServerUtils.getOzoneMetaDirPath(conf));
    assertTrue(metaDir.exists());
  } finally {
    FileUtils.deleteQuietly(metaDir);
  }
}
 
Example #21
Source File: TestFSImage.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * In this test case, I have created an image with a file having
 * preferredblockSize = 0. We are trying to read this image (since file with
 * preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode 
 * after 2.6 version will not be able to read this particular file.
 * See HDFS-7788 for more information.
 * @throws Exception
 */
@Test
public void testZeroBlockSize() throws Exception {
  final Configuration conf = new HdfsConfiguration();
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-with-zero-block-size");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));
  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, 
      nameDir.getAbsolutePath());
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
      .format(false)
      .manageDataDfsDirs(false)
      .manageNameDfsDirs(false)
      .waitSafeMode(false)
      .startupOption(StartupOption.UPGRADE)
      .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/tmp/zeroBlockFile");
    assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
    assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
  } finally {
    cluster.shutdown();
    //Clean up
    FileUtil.fullyDelete(dfsDir);
  }
}
 
Example #22
Source File: TestEditLogJournalFailures.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testMultipleRedundantFailedEditsDirOnSetReadyToFlush()
    throws IOException {
  // Set up 4 name/edits dirs.
  shutDownMiniCluster();
  Configuration conf = new HdfsConfiguration();
  String[] nameDirs = new String[4];
  for (int i = 0; i < nameDirs.length; i++) {
    File nameDir = new File(PathUtils.getTestDir(getClass()), "name-dir" + i);
    nameDir.mkdirs();
    nameDirs[i] = nameDir.getAbsolutePath();
  }
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      StringUtils.join(nameDirs, ","));
  
  // Keep running unless there are less than 2 edits dirs remaining.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, 2);
  setUpMiniCluster(conf, false);
  
  // All journals active.
  assertTrue(doAnEdit());
  // The NN has not terminated (no ExitException thrown)
  
  // Invalidate 1/4 of the redundant journals.
  invalidateEditsDirAtIndex(0, false, false);
  assertTrue(doAnEdit());
  // The NN has not terminated (no ExitException thrown)

  // Invalidate 2/4 of the redundant journals.
  invalidateEditsDirAtIndex(1, false, false);
  assertTrue(doAnEdit());
  // The NN has not terminated (no ExitException thrown)
  
  // Invalidate 3/4 of the redundant journals.
  invalidateEditsDirAtIndex(2, false, false);

  try {
    doAnEdit();
    fail("A failure of more than the minimum number of redundant journals "
        + "should have halted ");
  } catch (RemoteException re) {
    assertTrue(re.getClassName().contains("ExitException"));
    GenericTestUtils.assertExceptionContains(
        "Could not sync enough journals to persistent storage due to " +
        "setReadyToFlush failed for too many journals. " +
        "Unsynced transactions: 1", re);
  }
}
 
Example #23
Source File: TestMiniDFSCluster.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() {
  testDataPath = new File(PathUtils.getTestDir(getClass()), "miniclusters");
}
 
Example #24
Source File: TestContainerPlacement.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Test capacity based container placement policy with node reports.
 *
 * @throws IOException
 * @throws InterruptedException
 * @throws TimeoutException
 */
@Test
@Ignore
public void testContainerPlacementCapacity() throws IOException,
    InterruptedException, TimeoutException {
  OzoneConfiguration conf = getConf();
  final int nodeCount = 4;
  final long capacity = 10L * OzoneConsts.GB;
  final long used = 2L * OzoneConsts.GB;
  final long remaining = capacity - used;

  final File testDir = PathUtils.getTestDir(
      TestContainerPlacement.class);
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
      testDir.getAbsolutePath());
  conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
      SCMContainerPlacementCapacity.class, PlacementPolicy.class);

  SCMNodeManager nodeManager = createNodeManager(conf);
  SCMContainerManager containerManager =
      createContainerManager(conf, nodeManager);
  List<DatanodeDetails> datanodes =
      TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount);
  XceiverClientManager xceiverClientManager = null;
  try {
    for (DatanodeDetails datanodeDetails : datanodes) {
      nodeManager.processHeartbeat(datanodeDetails);
    }

    //TODO: wait for heartbeat to be processed
    Thread.sleep(4 * 1000);
    assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
    assertEquals(capacity * nodeCount,
        (long) nodeManager.getStats().getCapacity().get());
    assertEquals(used * nodeCount,
        (long) nodeManager.getStats().getScmUsed().get());
    assertEquals(remaining * nodeCount,
        (long) nodeManager.getStats().getRemaining().get());

    xceiverClientManager= new XceiverClientManager(conf);

    ContainerInfo container = containerManager
        .allocateContainer(
            SCMTestUtils.getReplicationType(conf),
            SCMTestUtils.getReplicationFactor(conf),
            OzoneConsts.OZONE);
    assertEquals(SCMTestUtils.getReplicationFactor(conf).getNumber(),
        containerManager.getContainerReplicas(
            container.containerID()).size());
  } finally {
    IOUtils.closeQuietly(containerManager);
    IOUtils.closeQuietly(nodeManager);
    if (xceiverClientManager != null) {
      xceiverClientManager.close();
    }
    FileUtil.fullyDelete(testDir);
  }
}
 
Example #25
Source File: TestPersistBlocks.java    From big-c with Apache License 2.0 4 votes vote down vote up
/**
 * Earlier versions of HDFS didn't persist block allocation to the edit log.
 * This makes sure that we can still load an edit log when the OP_CLOSE
 * is the opcode which adds all of the blocks. This is a regression
 * test for HDFS-2773.
 * This test uses a tarred pseudo-distributed cluster from Hadoop 1.0
 * which has a multi-block file. This is similar to the tests in
 * {@link TestDFSUpgradeFromImage} but none of those images include
 * a multi-block file.
 */
@Test
public void testEarlierVersionEditLog() throws Exception {
  final Configuration conf = new HdfsConfiguration();
      
  String tarFile = System.getProperty("test.cache.data", "build/test/cache")
    + "/" + HADOOP_1_0_MULTIBLOCK_TGZ;
  String testDir = PathUtils.getTestDirName(getClass());
  File dfsDir = new File(testDir, "image-1.0");
  if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
    throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
  }
  FileUtil.unTar(new File(tarFile), new File(testDir));

  File nameDir = new File(dfsDir, "name");
  GenericTestUtils.assertExists(nameDir);
  File dataDir = new File(dfsDir, "data");
  GenericTestUtils.assertExists(dataDir);
  
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
  conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDir.getAbsolutePath());
  
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
    .format(false)
    .manageDataDfsDirs(false)
    .manageNameDfsDirs(false)
    .numDataNodes(1)
    .startupOption(StartupOption.UPGRADE)
    .build();
  try {
    FileSystem fs = cluster.getFileSystem();
    Path testPath = new Path("/user/todd/4blocks");
    // Read it without caring about the actual data within - we just need
    // to make sure that the block states and locations are OK.
    DFSTestUtil.readFile(fs, testPath);
    
    // Ensure that we can append to it - if the blocks were in some funny
    // state we'd get some kind of issue here. 
    FSDataOutputStream stm = fs.append(testPath);
    try {
      stm.write(1);
    } finally {
      IOUtils.closeStream(stm);
    }
  } finally {
    cluster.shutdown();
  }
}
 
Example #26
Source File: TestSCMNodeManager.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
@Before
public void setup() {
  testDir = PathUtils.getTestDir(
      TestSCMNodeManager.class);
}
 
Example #27
Source File: TestReplicationPolicy.java    From big-c with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setupCluster() throws Exception {
  Configuration conf = new HdfsConfiguration();
  final String[] racks = {
      "/d1/r1",
      "/d1/r1",
      "/d1/r2",
      "/d1/r2",
      "/d2/r3",
      "/d2/r3"};
  storages = DFSTestUtil.createDatanodeStorageInfos(racks);
  dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());

  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
  DFSTestUtil.formatNameNode(conf);
  namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  replicator = bm.getBlockPlacementPolicy();
  cluster = bm.getDatanodeManager().getNetworkTopology();
  // construct network topology
  for (int i=0; i < NUM_OF_DATANODES; i++) {
    cluster.add(dataNodes[i]);
    bm.getDatanodeManager().getHeartbeatManager().addDatanode(
        dataNodes[i]);
  }
  for (int i=0; i < NUM_OF_DATANODES; i++) {
    updateHeartbeatWithUsage(dataNodes[i],
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0L, 0L, 0, 0);
  }    
}
 
Example #28
Source File: TestReplicationPolicyConsiderLoad.java    From big-c with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setupCluster() throws IOException {
  Configuration conf = new HdfsConfiguration();
  final String[] racks = {
      "/rack1",
      "/rack1",
      "/rack1",
      "/rack2",
      "/rack2",
      "/rack2"};
  storages = DFSTestUtil.createDatanodeStorageInfos(racks);
  dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
  DFSTestUtil.formatNameNode(conf);
  namenode = new NameNode(conf);
  int blockSize = 1024;

  dnrList = new ArrayList<DatanodeRegistration>();
  dnManager = namenode.getNamesystem().getBlockManager().getDatanodeManager();

  // Register DNs
  for (int i=0; i < 6; i++) {
    DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i],
        new StorageInfo(NodeType.DATA_NODE), new ExportedBlockKeys(),
        VersionInfo.getVersion());
    dnrList.add(dnr);
    dnManager.registerDatanode(dnr);
    dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L,
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L);
    dataNodes[i].updateHeartbeat(
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[i]),
        0L, 0L, 0, 0, null);
  }
}
 
Example #29
Source File: TestStartup.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testCompression() throws IOException {
  LOG.info("Test compressing image.");
  Configuration conf = new Configuration();
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
  File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(base_dir, "name").getPath());
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);

  DFSTestUtil.formatNameNode(conf);

  // create an uncompressed image
  LOG.info("Create an uncompressed fsimage");
  NameNode namenode = new NameNode(conf);
  namenode.getNamesystem().mkdirs("/test",
      new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
  NamenodeProtocols nnRpc = namenode.getRpcServer();
  assertTrue(nnRpc.getFileInfo("/test").isDir());
  nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  nnRpc.saveNamespace();
  namenode.stop();
  namenode.join();

  // compress image using default codec
  LOG.info("Read an uncomressed image and store it compressed using default codec.");
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
  checkNameSpace(conf);

  // read image compressed using the default and compress it using Gzip codec
  LOG.info("Read a compressed image and store it using a different codec.");
  conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
      "org.apache.hadoop.io.compress.GzipCodec");
  checkNameSpace(conf);

  // read an image compressed in Gzip and store it uncompressed
  LOG.info("Read a compressed image and store it as uncompressed.");
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
  checkNameSpace(conf);

  // read an uncomrpessed image and store it uncompressed
  LOG.info("Read an uncompressed image and store it as uncompressed.");
  checkNameSpace(conf);
}
 
Example #30
Source File: TestDFSShell.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test (timeout = 30000)
public void testURIPaths() throws Exception {
  Configuration srcConf = new HdfsConfiguration();
  Configuration dstConf = new HdfsConfiguration();
  MiniDFSCluster srcCluster =  null;
  MiniDFSCluster dstCluster = null;
  File bak = new File(PathUtils.getTestDir(getClass()), "dfs_tmp_uri");
  bak.mkdirs();
  try{
    srcCluster = new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build();
    dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, bak.getAbsolutePath());
    dstCluster = new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
    FileSystem srcFs = srcCluster.getFileSystem();
    FileSystem dstFs = dstCluster.getFileSystem();
    FsShell shell = new FsShell();
    shell.setConf(srcConf);
    //check for ls
    String[] argv = new String[2];
    argv[0] = "-ls";
    argv[1] = dstFs.getUri().toString() + "/";
    int ret = ToolRunner.run(shell, argv);
    assertEquals("ls works on remote uri ", 0, ret);
    //check for rm -r 
    dstFs.mkdirs(new Path("/hadoopdir"));
    argv = new String[2];
    argv[0] = "-rmr";
    argv[1] = dstFs.getUri().toString() + "/hadoopdir";
    ret = ToolRunner.run(shell, argv);
    assertEquals("-rmr works on remote uri " + argv[1], 0, ret);
    //check du 
    argv[0] = "-du";
    argv[1] = dstFs.getUri().toString() + "/";
    ret = ToolRunner.run(shell, argv);
    assertEquals("du works on remote uri ", 0, ret);
    //check put
    File furi = new File(TEST_ROOT_DIR, "furi");
    createLocalFile(furi);
    argv = new String[3];
    argv[0] = "-put";
    argv[1] = furi.toURI().toString();
    argv[2] = dstFs.getUri().toString() + "/furi";
    ret = ToolRunner.run(shell, argv);
    assertEquals(" put is working ", 0, ret);
    //check cp 
    argv[0] = "-cp";
    argv[1] = dstFs.getUri().toString() + "/furi";
    argv[2] = srcFs.getUri().toString() + "/furi";
    ret = ToolRunner.run(shell, argv);
    assertEquals(" cp is working ", 0, ret);
    assertTrue(srcFs.exists(new Path("/furi")));
    //check cat 
    argv = new String[2];
    argv[0] = "-cat";
    argv[1] = dstFs.getUri().toString() + "/furi";
    ret = ToolRunner.run(shell, argv);
    assertEquals(" cat is working ", 0, ret);
    //check chown
    dstFs.delete(new Path("/furi"), true);
    dstFs.delete(new Path("/hadoopdir"), true);
    String file = "/tmp/chownTest";
    Path path = new Path(file);
    Path parent = new Path("/tmp");
    Path root = new Path("/");
    TestDFSShell.writeFile(dstFs, path);
    runCmd(shell, "-chgrp", "-R", "herbivores", dstFs.getUri().toString() +"/*");
    confirmOwner(null, "herbivores", dstFs, parent, path);
    runCmd(shell, "-chown", "-R", ":reptiles", dstFs.getUri().toString() + "/");
    confirmOwner(null, "reptiles", dstFs, root, parent, path);
    //check if default hdfs:/// works 
    argv[0] = "-cat";
    argv[1] = "hdfs:///furi";
    ret = ToolRunner.run(shell, argv);
    assertEquals(" default works for cat", 0, ret);
    argv[0] = "-ls";
    argv[1] = "hdfs:///";
    ret = ToolRunner.run(shell, argv);
    assertEquals("default works for ls ", 0, ret);
    argv[0] = "-rmr";
    argv[1] = "hdfs:///furi";
    ret = ToolRunner.run(shell, argv);
    assertEquals("default works for rm/rmr", 0, ret);
  } finally {
    if (null != srcCluster) {
      srcCluster.shutdown();
    }
    if (null != dstCluster) {
      dstCluster.shutdown();
    }
  }
}