Java Code Examples for org.apache.hadoop.test.PathUtils#getTestDir()

The following examples show how to use org.apache.hadoop.test.PathUtils#getTestDir() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestServerUtils.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
/**
 * Test {@link ServerUtils#getScmDbDir}.
 */
@Test
public void testGetScmDbDir() {
  final File testDir = PathUtils.getTestDir(TestServerUtils.class);
  final File dbDir = new File(testDir, "scmDbDir");
  final File metaDir = new File(testDir, "metaDir");
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath());
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());

  try {
    assertFalse(metaDir.exists());
    assertFalse(dbDir.exists());
    assertEquals(dbDir, ServerUtils.getScmDbDir(conf));
    assertTrue(dbDir.exists());
    assertFalse(metaDir.exists());
  } finally {
    FileUtils.deleteQuietly(dbDir);
  }
}
 
Example 2
Source File: TestHddsServerUtils.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
/**
 * Test {@link ServerUtils#getScmDbDir} with fallback to OZONE_METADATA_DIRS
 * when OZONE_SCM_DB_DIRS is undefined.
 */
@Test
public void testGetScmDbDirWithFallback() {
  final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
  final File metaDir = new File(testDir, "metaDir");
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
  try {
    assertEquals(metaDir, ServerUtils.getScmDbDir(conf));
    assertTrue(metaDir.exists());        // should have been created.
  } finally {
    FileUtils.deleteQuietly(metaDir);
  }
}
 
Example 3
Source File: TestReplicationPolicyWithNodeGroup.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
  CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  // Set properties to make HDFS aware of NodeGroup.
  CONF.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, 
      BlockPlacementPolicyWithNodeGroup.class.getName());
  CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, 
      NetworkTopologyWithNodeGroup.class.getName());
  
  CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
  
  File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
  
  CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  
  DFSTestUtil.formatNameNode(CONF);
  namenode = new NameNode(CONF);
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  replicator = bm.getBlockPlacementPolicy();
  cluster = bm.getDatanodeManager().getNetworkTopology();
  // construct network topology
  for(int i=0; i<NUM_OF_DATANODES; i++) {
    cluster.add(dataNodes[i]);
  }
  setupDataNodeCapacity();
}
 
Example 4
Source File: TestReplicationPolicyWithNodeGroup.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
  FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
  CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  // Set properties to make HDFS aware of NodeGroup.
  CONF.set(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY, 
      BlockPlacementPolicyWithNodeGroup.class.getName());
  CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, 
      NetworkTopologyWithNodeGroup.class.getName());
  
  CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
  
  File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
  
  CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  
  DFSTestUtil.formatNameNode(CONF);
  namenode = new NameNode(CONF);
  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  replicator = bm.getBlockPlacementPolicy();
  cluster = bm.getDatanodeManager().getNetworkTopology();
  // construct network topology
  for(int i=0; i<NUM_OF_DATANODES; i++) {
    cluster.add(dataNodes[i]);
  }
  setupDataNodeCapacity();
}
 
Example 5
Source File: TestCheckpoint.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Checks that an IOException in NNStorage.writeTransactionIdFile is handled
 * correctly (by removing the storage directory)
 * See https://issues.apache.org/jira/browse/HDFS-2011
 */
@Test
public void testWriteTransactionIdHandlesIOE() throws Exception {
  LOG.info("Check IOException handled correctly by writeTransactionIdFile");
  ArrayList<URI> fsImageDirs = new ArrayList<URI>();
  ArrayList<URI> editsDirs = new ArrayList<URI>();
  File filePath =
    new File(PathUtils.getTestDir(getClass()), "storageDirToCheck");
  assertTrue("Couldn't create directory storageDirToCheck",
             filePath.exists() || filePath.mkdirs());
  fsImageDirs.add(filePath.toURI());
  editsDirs.add(filePath.toURI());
  NNStorage nnStorage = new NNStorage(new HdfsConfiguration(),
    fsImageDirs, editsDirs);
  try {
    assertTrue("List of storage directories didn't have storageDirToCheck.",
               nnStorage.getEditsDirectories().iterator().next().
               toString().indexOf("storageDirToCheck") != -1);
    assertTrue("List of removed storage directories wasn't empty",
               nnStorage.getRemovedStorageDirs().isEmpty());
  } finally {
    // Delete storage directory to cause IOException in writeTransactionIdFile 
    assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(),
               filePath.delete());
  }
  // Just call writeTransactionIdFile using any random number
  nnStorage.writeTransactionIdFileToStorage(1);
  List<StorageDirectory> listRsd = nnStorage.getRemovedStorageDirs();
  assertTrue("Removed directory wasn't what was expected",
             listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().
             toString().indexOf("storageDirToCheck") != -1);
  nnStorage.close();
}
 
Example 6
Source File: TestCheckpoint.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Checks that an IOException in NNStorage.writeTransactionIdFile is handled
 * correctly (by removing the storage directory)
 * See https://issues.apache.org/jira/browse/HDFS-2011
 */
@Test
public void testWriteTransactionIdHandlesIOE() throws Exception {
  LOG.info("Check IOException handled correctly by writeTransactionIdFile");
  ArrayList<URI> fsImageDirs = new ArrayList<URI>();
  ArrayList<URI> editsDirs = new ArrayList<URI>();
  File filePath =
    new File(PathUtils.getTestDir(getClass()), "storageDirToCheck");
  assertTrue("Couldn't create directory storageDirToCheck",
             filePath.exists() || filePath.mkdirs());
  fsImageDirs.add(filePath.toURI());
  editsDirs.add(filePath.toURI());
  NNStorage nnStorage = new NNStorage(new HdfsConfiguration(),
    fsImageDirs, editsDirs);
  try {
    assertTrue("List of storage directories didn't have storageDirToCheck.",
               nnStorage.getEditsDirectories().iterator().next().
               toString().indexOf("storageDirToCheck") != -1);
    assertTrue("List of removed storage directories wasn't empty",
               nnStorage.getRemovedStorageDirs().isEmpty());
  } finally {
    // Delete storage directory to cause IOException in writeTransactionIdFile 
    assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(),
               filePath.delete());
  }
  // Just call writeTransactionIdFile using any random number
  nnStorage.writeTransactionIdFileToStorage(1);
  List<StorageDirectory> listRsd = nnStorage.getRemovedStorageDirs();
  assertTrue("Removed directory wasn't what was expected",
             listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().
             toString().indexOf("storageDirToCheck") != -1);
  nnStorage.close();
}
 
Example 7
Source File: TestServerUtils.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@Test
public void ozoneMetadataDirAcceptsSingleItem() {
  final File testDir = PathUtils.getTestDir(TestServerUtils.class);
  final File metaDir = new File(testDir, "metaDir");
  final OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());

  try {
    assertFalse(metaDir.exists());
    assertEquals(metaDir, ServerUtils.getOzoneMetaDirPath(conf));
    assertTrue(metaDir.exists());
  } finally {
    FileUtils.deleteQuietly(metaDir);
  }
}
 
Example 8
Source File: TestEndPoint.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  serverAddress = SCMTestUtils.getReuseableAddress();
  scmServerImpl = new ScmTestMock();
  scmServer = SCMTestUtils.startScmRpcServer(SCMTestUtils.getConf(),
      scmServerImpl, serverAddress, 10);
  testDir = PathUtils.getTestDir(TestEndPoint.class);
  config = SCMTestUtils.getConf();
  config.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath());
  config.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
  config
      .setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
  config.set(HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL, "1s");
}
 
Example 9
Source File: TestMiniDFSCluster.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() {
  testDataPath = new File(PathUtils.getTestDir(getClass()), "miniclusters");
}
 
Example 10
Source File: TestReplicationPolicyConsiderLoad.java    From big-c with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setupCluster() throws IOException {
  Configuration conf = new HdfsConfiguration();
  final String[] racks = {
      "/rack1",
      "/rack1",
      "/rack1",
      "/rack2",
      "/rack2",
      "/rack2"};
  storages = DFSTestUtil.createDatanodeStorageInfos(racks);
  dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
  DFSTestUtil.formatNameNode(conf);
  namenode = new NameNode(conf);
  int blockSize = 1024;

  dnrList = new ArrayList<DatanodeRegistration>();
  dnManager = namenode.getNamesystem().getBlockManager().getDatanodeManager();

  // Register DNs
  for (int i=0; i < 6; i++) {
    DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i],
        new StorageInfo(NodeType.DATA_NODE), new ExportedBlockKeys(),
        VersionInfo.getVersion());
    dnrList.add(dnr);
    dnManager.registerDatanode(dnr);
    dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L,
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L);
    dataNodes[i].updateHeartbeat(
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[i]),
        0L, 0L, 0, 0, null);
  }
}
 
Example 11
Source File: TestBlockStoragePolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseTargetWithTopology() throws Exception {
  BlockStoragePolicy policy1 = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});
  BlockStoragePolicy policy2 = new BlockStoragePolicy((byte) 11, "TEST2",
      new StorageType[]{StorageType.DISK, StorageType.SSD,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r2", "/d1/r2"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] types = {StorageType.DISK, StorageType.SSD,
      StorageType.ARCHIVE};

  final DatanodeStorageInfo[] storages = DFSTestUtil
      .createDatanodeStorageInfos(3, racks, hosts, types);
  final DatanodeDescriptor[] dataNodes = DFSTestUtil
      .toDatanodeDescriptor(storages);

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy1);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
  targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy2);
  System.out.println(Arrays.asList(targets));
  Assert.assertEquals(3, targets.length);
}
 
Example 12
Source File: TestBlockStoragePolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testChooseSsdOverDisk() throws Exception {
  BlockStoragePolicy policy = new BlockStoragePolicy((byte) 9, "TEST1",
      new StorageType[]{StorageType.SSD, StorageType.DISK,
          StorageType.ARCHIVE}, new StorageType[]{}, new StorageType[]{});

  final String[] racks = {"/d1/r1", "/d1/r1", "/d1/r1"};
  final String[] hosts = {"host1", "host2", "host3"};
  final StorageType[] disks = {StorageType.DISK, StorageType.DISK, StorageType.DISK};

  final DatanodeStorageInfo[] diskStorages
      = DFSTestUtil.createDatanodeStorageInfos(3, racks, hosts, disks);
  final DatanodeDescriptor[] dataNodes
      = DFSTestUtil.toDatanodeDescriptor(diskStorages);
  for(int i = 0; i < dataNodes.length; i++) {
    BlockManagerTestUtil.updateStorage(dataNodes[i],
        new DatanodeStorage("ssd" + i, DatanodeStorage.State.NORMAL,
            StorageType.SSD));
  }

  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  DFSTestUtil.formatNameNode(conf);
  NameNode namenode = new NameNode(conf);

  final BlockManager bm = namenode.getNamesystem().getBlockManager();
  BlockPlacementPolicy replicator = bm.getBlockPlacementPolicy();
  NetworkTopology cluster = bm.getDatanodeManager().getNetworkTopology();
  for (DatanodeDescriptor datanode : dataNodes) {
    cluster.add(datanode);
  }

  DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
      dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
      new HashSet<Node>(), 0, policy);
  System.out.println(policy.getName() + ": " + Arrays.asList(targets));
  Assert.assertEquals(2, targets.length);
  Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
  Assert.assertEquals(StorageType.DISK, targets[1].getStorageType());
}
 
Example 13
Source File: TestSCMNodeManager.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
@Before
public void setup() {
  testDir = PathUtils.getTestDir(
      TestSCMNodeManager.class);
}
 
Example 14
Source File: TestStartup.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Test
public void testCompression() throws IOException {
  LOG.info("Test compressing image.");
  Configuration conf = new Configuration();
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
  File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(base_dir, "name").getPath());
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);

  DFSTestUtil.formatNameNode(conf);

  // create an uncompressed image
  LOG.info("Create an uncompressed fsimage");
  NameNode namenode = new NameNode(conf);
  namenode.getNamesystem().mkdirs("/test",
      new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
  NamenodeProtocols nnRpc = namenode.getRpcServer();
  assertTrue(nnRpc.getFileInfo("/test").isDir());
  nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  nnRpc.saveNamespace();
  namenode.stop();
  namenode.join();

  // compress image using default codec
  LOG.info("Read an uncomressed image and store it compressed using default codec.");
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
  checkNameSpace(conf);

  // read image compressed using the default and compress it using Gzip codec
  LOG.info("Read a compressed image and store it using a different codec.");
  conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
      "org.apache.hadoop.io.compress.GzipCodec");
  checkNameSpace(conf);

  // read an image compressed in Gzip and store it uncompressed
  LOG.info("Read a compressed image and store it as uncompressed.");
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
  checkNameSpace(conf);

  // read an uncomrpessed image and store it uncompressed
  LOG.info("Read an uncompressed image and store it as uncompressed.");
  checkNameSpace(conf);
}
 
Example 15
Source File: TestReplicationPolicyConsiderLoad.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setupCluster() throws IOException {
  Configuration conf = new HdfsConfiguration();
  final String[] racks = {
      "/rack1",
      "/rack1",
      "/rack1",
      "/rack2",
      "/rack2",
      "/rack2"};
  storages = DFSTestUtil.createDatanodeStorageInfos(racks);
  dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
  File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(baseDir, "name").getPath());
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
  conf.setBoolean(
      DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY, true);
  DFSTestUtil.formatNameNode(conf);
  namenode = new NameNode(conf);
  int blockSize = 1024;

  dnrList = new ArrayList<DatanodeRegistration>();
  dnManager = namenode.getNamesystem().getBlockManager().getDatanodeManager();

  // Register DNs
  for (int i=0; i < 6; i++) {
    DatanodeRegistration dnr = new DatanodeRegistration(dataNodes[i],
        new StorageInfo(NodeType.DATA_NODE), new ExportedBlockKeys(),
        VersionInfo.getVersion());
    dnrList.add(dnr);
    dnManager.registerDatanode(dnr);
    dataNodes[i].getStorageInfos()[0].setUtilizationForTesting(
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L,
        2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*blockSize, 0L);
    dataNodes[i].updateHeartbeat(
        BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[i]),
        0L, 0L, 0, 0, null);
  }
}
 
Example 16
Source File: TestContainerPlacement.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Test capacity based container placement policy with node reports.
 *
 * @throws IOException
 * @throws InterruptedException
 * @throws TimeoutException
 */
@Test
@Ignore
public void testContainerPlacementCapacity() throws IOException,
    InterruptedException, TimeoutException {
  OzoneConfiguration conf = getConf();
  final int nodeCount = 4;
  final long capacity = 10L * OzoneConsts.GB;
  final long used = 2L * OzoneConsts.GB;
  final long remaining = capacity - used;

  final File testDir = PathUtils.getTestDir(
      TestContainerPlacement.class);
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
      testDir.getAbsolutePath());
  conf.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
      SCMContainerPlacementCapacity.class, PlacementPolicy.class);

  SCMNodeManager nodeManager = createNodeManager(conf);
  SCMContainerManager containerManager =
      createContainerManager(conf, nodeManager);
  List<DatanodeDetails> datanodes =
      TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount);
  XceiverClientManager xceiverClientManager = null;
  try {
    for (DatanodeDetails datanodeDetails : datanodes) {
      nodeManager.processHeartbeat(datanodeDetails);
    }

    //TODO: wait for heartbeat to be processed
    Thread.sleep(4 * 1000);
    assertEquals(nodeCount, nodeManager.getNodeCount(HEALTHY));
    assertEquals(capacity * nodeCount,
        (long) nodeManager.getStats().getCapacity().get());
    assertEquals(used * nodeCount,
        (long) nodeManager.getStats().getScmUsed().get());
    assertEquals(remaining * nodeCount,
        (long) nodeManager.getStats().getRemaining().get());

    xceiverClientManager= new XceiverClientManager(conf);

    ContainerInfo container = containerManager
        .allocateContainer(
            SCMTestUtils.getReplicationType(conf),
            SCMTestUtils.getReplicationFactor(conf),
            OzoneConsts.OZONE);
    assertEquals(SCMTestUtils.getReplicationFactor(conf).getNumber(),
        containerManager.getContainerReplicas(
            container.containerID()).size());
  } finally {
    IOUtils.closeQuietly(containerManager);
    IOUtils.closeQuietly(nodeManager);
    if (xceiverClientManager != null) {
      xceiverClientManager.close();
    }
    FileUtil.fullyDelete(testDir);
  }
}
 
Example 17
Source File: TestReplicationPolicy.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@BeforeClass
  public static void setupCluster() throws Exception {
    Configuration conf = new HdfsConfiguration();
    final String[] racks = {
        "/d1/r1",
        "/d1/r1",
        "/d1/r2",
        "/d1/r2",
        "/d2/r3",
        "/d2/r3"};
    storages = DFSTestUtil.createDatanodeStorageInfos(racks);
    dataNodes = DFSTestUtil.toDatanodeDescriptor(storages);

    // create an extra storage for dn5.
    DatanodeStorage extraStorage = new DatanodeStorage(
        storages[5].getStorageID() + "-extra", DatanodeStorage.State.NORMAL,
        StorageType.DEFAULT);
/*    DatanodeStorageInfo si = new DatanodeStorageInfo(
        storages[5].getDatanodeDescriptor(), extraStorage);
*/
    BlockManagerTestUtil.updateStorage(storages[5].getDatanodeDescriptor(),
        extraStorage);

    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
    File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class);
    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
        new File(baseDir, "name").getPath());

    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
    conf.setBoolean(
        DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
    DFSTestUtil.formatNameNode(conf);
    namenode = new NameNode(conf);

    final BlockManager bm = namenode.getNamesystem().getBlockManager();
    replicator = bm.getBlockPlacementPolicy();
    cluster = bm.getDatanodeManager().getNetworkTopology();
    // construct network topology
    for (int i=0; i < NUM_OF_DATANODES; i++) {
      cluster.add(dataNodes[i]);
      bm.getDatanodeManager().getHeartbeatManager().addDatanode(
          dataNodes[i]);
    }
    resetHeartbeatForStorages();
  }
 
Example 18
Source File: TestMiniDFSCluster.java    From hadoop with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() {
  testDataPath = new File(PathUtils.getTestDir(getClass()), "miniclusters");
}
 
Example 19
Source File: TestEditLogJournalFailures.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testMultipleRedundantFailedEditsDirOnSetReadyToFlush()
    throws IOException {
  // Set up 4 name/edits dirs.
  shutDownMiniCluster();
  Configuration conf = new HdfsConfiguration();
  String[] nameDirs = new String[4];
  for (int i = 0; i < nameDirs.length; i++) {
    File nameDir = new File(PathUtils.getTestDir(getClass()), "name-dir" + i);
    nameDir.mkdirs();
    nameDirs[i] = nameDir.getAbsolutePath();
  }
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      StringUtils.join(nameDirs, ","));
  
  // Keep running unless there are less than 2 edits dirs remaining.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, 2);
  setUpMiniCluster(conf, false);
  
  // All journals active.
  assertTrue(doAnEdit());
  // The NN has not terminated (no ExitException thrown)
  
  // Invalidate 1/4 of the redundant journals.
  invalidateEditsDirAtIndex(0, false, false);
  assertTrue(doAnEdit());
  // The NN has not terminated (no ExitException thrown)

  // Invalidate 2/4 of the redundant journals.
  invalidateEditsDirAtIndex(1, false, false);
  assertTrue(doAnEdit());
  // The NN has not terminated (no ExitException thrown)
  
  // Invalidate 3/4 of the redundant journals.
  invalidateEditsDirAtIndex(2, false, false);

  try {
    doAnEdit();
    fail("A failure of more than the minimum number of redundant journals "
        + "should have halted ");
  } catch (RemoteException re) {
    assertTrue(re.getClassName().contains("ExitException"));
    GenericTestUtils.assertExceptionContains(
        "Could not sync enough journals to persistent storage due to " +
        "setReadyToFlush failed for too many journals. " +
        "Unsynced transactions: 1", re);
  }
}
 
Example 20
Source File: TestStartup.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Test
public void testCompression() throws IOException {
  LOG.info("Test compressing image.");
  Configuration conf = new Configuration();
  FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
  conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
  File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/");
  conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
      new File(base_dir, "name").getPath());
  conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);

  DFSTestUtil.formatNameNode(conf);

  // create an uncompressed image
  LOG.info("Create an uncompressed fsimage");
  NameNode namenode = new NameNode(conf);
  namenode.getNamesystem().mkdirs("/test",
      new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
  NamenodeProtocols nnRpc = namenode.getRpcServer();
  assertTrue(nnRpc.getFileInfo("/test").isDir());
  nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
  nnRpc.saveNamespace();
  namenode.stop();
  namenode.join();

  // compress image using default codec
  LOG.info("Read an uncomressed image and store it compressed using default codec.");
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
  checkNameSpace(conf);

  // read image compressed using the default and compress it using Gzip codec
  LOG.info("Read a compressed image and store it using a different codec.");
  conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
      "org.apache.hadoop.io.compress.GzipCodec");
  checkNameSpace(conf);

  // read an image compressed in Gzip and store it uncompressed
  LOG.info("Read a compressed image and store it as uncompressed.");
  conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
  checkNameSpace(conf);

  // read an uncomrpessed image and store it uncompressed
  LOG.info("Read an uncompressed image and store it as uncompressed.");
  checkNameSpace(conf);
}