Java Code Examples for org.apache.hadoop.test.GenericTestUtils#getTestDir()

The following examples show how to use org.apache.hadoop.test.GenericTestUtils#getTestDir() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSCMPipelineManager.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Before
public void setUp() throws Exception {
  conf = new OzoneConfiguration();
  conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1);
  testDir = GenericTestUtils
      .getTestDir(TestSCMPipelineManager.class.getSimpleName());
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
  conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false);
  boolean folderExisted = testDir.exists() || testDir.mkdirs();
  if (!folderExisted) {
    throw new IOException("Unable to create test directory path");
  }
  nodeManager = new MockNodeManager(true, 20);

  scmMetadataStore = new SCMMetadataStoreImpl(conf);
}
 
Example 2
Source File: TestMetadataStore.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Test
public void testDestroyDB() throws IOException {
  // create a new DB to test db destroy
  OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);

  File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
      + "-" + storeImpl.toLowerCase() + "-toDestroy");
  MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
      .setConf(conf)
      .setCreateIfMissing(true)
      .setDbFile(dbDir)
      .build();

  dbStore.put(getBytes("key1"), getBytes("value1"));
  dbStore.put(getBytes("key2"), getBytes("value2"));

  assertFalse(dbStore.isEmpty());
  assertTrue(dbDir.exists());
  assertTrue(dbDir.listFiles().length > 0);

  dbStore.destroy();

  assertFalse(dbDir.exists());
}
 
Example 3
Source File: TestMetadataStore.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Test
public void testdbTypeNotSet() throws IOException {

  OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
  GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG);
  GenericTestUtils.LogCapturer logCapturer =
      GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);

  File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
      + "-" + storeImpl.toLowerCase() + "-test");
  MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf)
      .setCreateIfMissing(true).setDbFile(dbDir).build();
  assertTrue(logCapturer.getOutput().contains("dbType is null, using dbType" +
      " " + storeImpl));
  dbStore.close();
  dbStore.destroy();
  FileUtils.deleteDirectory(dbDir);

}
 
Example 4
Source File: TestMetadataStore.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
@Test
public void testMetaStoreConfigDifferentFromType() throws IOException {

  OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
  String dbType;
  GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG);
  GenericTestUtils.LogCapturer logCapturer =
      GenericTestUtils.LogCapturer.captureLogs(MetadataStoreBuilder.LOG);
  if (storeImpl.equals(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB)) {
    dbType = "RocksDB";
  } else {
    dbType = "LevelDB";
  }

  File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
      + "-" + dbType.toLowerCase() + "-test");
  MetadataStore dbStore = MetadataStoreBuilder.newBuilder().setConf(conf)
      .setCreateIfMissing(true).setDbFile(dbDir).setDBType(dbType).build();
  assertTrue(logCapturer.getOutput().contains("Using dbType " + dbType + "" +
      " for metastore"));
  dbStore.close();
  dbStore.destroy();
  FileUtils.deleteDirectory(dbDir);

}
 
Example 5
Source File: TestRocksDBStoreMBean.java    From hadoop-ozone with Apache License 2.0 6 votes vote down vote up
private RocksDBStore getTestRocksDBStoreWithData() throws IOException {
  File testDir =
      GenericTestUtils.getTestDir(getClass().getSimpleName() + "-withstat");

  conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS, "ALL");

  RocksDBStore metadataStore =
      (RocksDBStore) MetadataStoreBuilder.newBuilder().setConf(conf)
          .setCreateIfMissing(true).setDbFile(testDir).build();

  for (int i = 0; i < 10; i++) {
    metadataStore.put("key".getBytes(UTF_8), "value".getBytes(UTF_8));
  }

  return metadataStore;
}
 
Example 6
Source File: TestRocksDBStoreMBean.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@Test()
public void testDisabledStat() throws Exception {
  File testDir = GenericTestUtils
      .getTestDir(getClass().getSimpleName() + "-withoutstat");

  conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS,
      OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF);

  RocksDBStore metadataStore =
      (RocksDBStore) MetadataStoreBuilder.newBuilder().setConf(conf)
          .setCreateIfMissing(true).setDbFile(testDir).build();

  Assert.assertNull(metadataStore.getStatMBeanName());
}
 
Example 7
Source File: TestCloseContainerEventHandler.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  configuration = SCMTestUtils.getConf();
  size = (long)configuration.getStorageSize(OZONE_SCM_CONTAINER_SIZE,
      OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
  testDir = GenericTestUtils
      .getTestDir(TestCloseContainerEventHandler.class.getSimpleName());
  configuration
      .set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
  configuration.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 16);
  nodeManager = new MockNodeManager(true, 10);
  eventQueue = new EventQueue();
  scmMetadataStore = new SCMMetadataStoreImpl(configuration);

  pipelineManager =
      new SCMPipelineManager(configuration, nodeManager,
          scmMetadataStore.getPipelineTable(), eventQueue);
  pipelineManager.allowPipelineCreation();
  PipelineProvider mockRatisProvider =
      new MockRatisPipelineProvider(nodeManager,
          pipelineManager.getStateManager(), configuration, eventQueue);
  pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
      mockRatisProvider);
  containerManager = new SCMContainerManager(
          configuration,
          scmMetadataStore.getContainerTable(),
          scmMetadataStore.getStore(),
          pipelineManager);
  pipelineManager.triggerPipelineCreation();
  eventQueue.addHandler(CLOSE_CONTAINER,
      new CloseContainerEventHandler(
              pipelineManager,
              containerManager));
  eventQueue.addHandler(DATANODE_COMMAND, nodeManager);
  // Move all pipelines created by background from ALLOCATED to OPEN state
  Thread.sleep(2000);
  TestUtils.openAllRatisPipelines(pipelineManager);
}
 
Example 8
Source File: TestMetadataStore.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@Before
public void init() throws IOException {
  if (OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(storeImpl)) {
    // The initialization of RocksDB fails on Windows
    assumeNotWindows();
  }

  testDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
      + "-" + storeImpl.toLowerCase());

  OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);

  store = MetadataStoreBuilder.newBuilder()
      .setConf(conf)
      .setCreateIfMissing(true)
      .setDbFile(testDir)
      .build();

  // Add 20 entries.
  // {a0 : a-value0} to {a9 : a-value9}
  // {b0 : b-value0} to {b9 : b-value9}
  for (int i = 0; i < 10; i++) {
    store.put(getBytes("a" + i), getBytes("a-value" + i));
    store.put(getBytes("b" + i), getBytes("b-value" + i));
  }
}
 
Example 9
Source File: TestSCMContainerManager.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUp() throws Exception {
  OzoneConfiguration conf = SCMTestUtils.getConf();

  testDir = GenericTestUtils
      .getTestDir(TestSCMContainerManager.class.getSimpleName());
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
      testDir.getAbsolutePath());
  boolean folderExisted = testDir.exists() || testDir.mkdirs();
  if (!folderExisted) {
    throw new IOException("Unable to create test directory path");
  }
  nodeManager = new MockNodeManager(true, 10);
  SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(conf);
  pipelineManager =
      new SCMPipelineManager(conf, nodeManager,
          scmMetadataStore.getPipelineTable(), new EventQueue());
  pipelineManager.allowPipelineCreation();
  containerManager = new SCMContainerManager(conf,
      scmMetadataStore.getContainerTable(),
      scmMetadataStore.getStore(),
      pipelineManager);
  xceiverClientManager = new XceiverClientManager(conf);
  replicationFactor = SCMTestUtils.getReplicationFactor(conf);
  replicationType = SCMTestUtils.getReplicationType(conf);
  random = new Random();
}
 
Example 10
Source File: TestDeletedBlockLog.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws Exception {
  testDir = GenericTestUtils.getTestDir(
      TestDeletedBlockLog.class.getSimpleName());
  conf = new OzoneConfiguration();
  conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20);
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
  scm = TestUtils.getScm(conf);
  containerManager = Mockito.mock(SCMContainerManager.class);
  deletedBlockLog = new DeletedBlockLogImpl(conf, containerManager,
      scm.getScmMetadataStore());
  dnList = new ArrayList<>(3);
  setupContainerManager();
}
 
Example 11
Source File: TestBlockDeletingService.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void init() throws IOException {
  testRoot = GenericTestUtils
      .getTestDir(TestBlockDeletingService.class.getSimpleName());
  if (testRoot.exists()) {
    FileUtils.cleanDirectory(testRoot);
  }
  scmId = UUID.randomUUID().toString();
  clusterID = UUID.randomUUID().toString();
}
 
Example 12
Source File: TestSecureOzoneCluster.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@Before
  public void init() {
    try {
      conf = new OzoneConfiguration();
      conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost");

      conf.setInt(OZONE_SCM_CLIENT_PORT_KEY,
          getPort(OZONE_SCM_CLIENT_PORT_DEFAULT, 100));
      conf.setInt(OZONE_SCM_DATANODE_PORT_KEY,
          getPort(OZONE_SCM_DATANODE_PORT_DEFAULT, 100));
      conf.setInt(OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
          getPort(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100));
      conf.setInt(OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
          getPort(OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100));

      DefaultMetricsSystem.setMiniClusterMode(true);
      final String path = folder.newFolder().toString();
      Path metaDirPath = Paths.get(path, "om-meta");
      conf.set(OZONE_METADATA_DIRS, metaDirPath.toString());
      conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
      conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name());

      workDir = GenericTestUtils.getTestDir(getClass().getSimpleName());

      startMiniKdc();
      setSecureConfig();
      createCredentialsInKDC();
      generateKeyPair();
//      OzoneManager.setTestSecureOmFlag(true);
    } catch (Exception e) {
      LOG.error("Failed to initialize TestSecureOzoneCluster", e);
    }
  }
 
Example 13
Source File: TestDelegationToken.java    From hadoop-ozone with Apache License 2.0 5 votes vote down vote up
@Before
public void init() {
  try {
    conf = new OzoneConfiguration();
    conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost");

    conf.setInt(OZONE_SCM_CLIENT_PORT_KEY,
        getPort(OZONE_SCM_CLIENT_PORT_DEFAULT, 100));
    conf.setInt(OZONE_SCM_DATANODE_PORT_KEY,
        getPort(OZONE_SCM_DATANODE_PORT_DEFAULT, 100));
    conf.setInt(OZONE_SCM_BLOCK_CLIENT_PORT_KEY,
        getPort(OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, 100));
    conf.setInt(OZONE_SCM_SECURITY_SERVICE_PORT_KEY,
        getPort(OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT, 100));

    DefaultMetricsSystem.setMiniClusterMode(true);
    final String path = folder.newFolder().toString();
    Path metaDirPath = Paths.get(path, "om-meta");
    conf.set(OZONE_METADATA_DIRS, metaDirPath.toString());
    conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
    conf.set(HADOOP_SECURITY_AUTHENTICATION, KERBEROS.name());

    workDir = GenericTestUtils.getTestDir(getClass().getSimpleName());

    startMiniKdc();
    setSecureConfig();
    createCredentialsInKDC();
    generateKeyPair();
    //      OzoneManager.setTestSecureOmFlag(true);
  } catch (Exception e) {
    LOG.error("Failed to initialize TestSecureOzoneCluster", e);
  }
}
 
Example 14
Source File: TestMetadataStore.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
@Test
public void testBatchWrite() throws IOException {
  OzoneConfiguration conf = new OzoneConfiguration();
  conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);

  File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
      + "-" + storeImpl.toLowerCase() + "-batchWrite");
  MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
      .setConf(conf)
      .setCreateIfMissing(true)
      .setDbFile(dbDir)
      .build();

  List<String> expectedResult = Lists.newArrayList();
  for (int i = 0; i < 10; i++) {
    dbStore.put(getBytes("batch-" + i), getBytes("batch-value-" + i));
    expectedResult.add("batch-" + i);
  }

  BatchOperation batch = new BatchOperation();
  batch.delete(getBytes("batch-2"));
  batch.delete(getBytes("batch-3"));
  batch.delete(getBytes("batch-4"));
  batch.put(getBytes("batch-new-2"), getBytes("batch-new-value-2"));

  expectedResult.remove("batch-2");
  expectedResult.remove("batch-3");
  expectedResult.remove("batch-4");
  expectedResult.add("batch-new-2");

  dbStore.writeBatch(batch);

  Iterator<String> it = expectedResult.iterator();
  AtomicInteger count = new AtomicInteger(0);
  dbStore.iterate(null, (key, value) -> {
    count.incrementAndGet();
    return it.hasNext() && it.next().equals(getString(key));
  });

  assertEquals(8, count.get());
}
 
Example 15
Source File: TestOzoneAtRestEncryption.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
   * Create a MiniOzoneCluster for testing.
   * <p>
   * Ozone is made active by setting OZONE_ENABLED = true
   *
   * @throws IOException
   */
@BeforeClass
public static void init() throws Exception {
  testDir = GenericTestUtils.getTestDir(
      TestSecureOzoneRpcClient.class.getSimpleName());

  File kmsDir = new File(testDir, UUID.randomUUID().toString());
  Assert.assertTrue(kmsDir.mkdirs());
  MiniKMS.Builder miniKMSBuilder = new MiniKMS.Builder();
  miniKMS = miniKMSBuilder.setKmsConfDir(kmsDir).build();
  miniKMS.start();

  OzoneManager.setTestSecureOmFlag(true);
  conf = new OzoneConfiguration();
  conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
      getKeyProviderURI(miniKMS));
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
  conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, true);
  conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
  CertificateClientTestImpl certificateClientTest =
      new CertificateClientTestImpl(conf);
  cluster = MiniOzoneCluster.newBuilder(conf)
      .setNumDatanodes(10)
      .setScmId(SCM_ID)
      .setCertificateClient(certificateClientTest)
      .build();
  cluster.getOzoneManager().startSecretManager();
  cluster.waitForClusterToBeReady();
  ozClient = OzoneClientFactory.getRpcClient(conf);
  store = ozClient.getObjectStore();
  storageContainerLocationClient =
      cluster.getStorageContainerLocationClient();
  ozoneManager = cluster.getOzoneManager();
  TestOzoneRpcClient.setCluster(cluster);
  TestOzoneRpcClient.setOzClient(ozClient);
  TestOzoneRpcClient.setOzoneManager(ozoneManager);
  TestOzoneRpcClient.setStorageContainerLocationClient(
      storageContainerLocationClient);
  TestOzoneRpcClient.setStore(store);
  TestOzoneRpcClient.setScmId(SCM_ID);

  // create test key
  createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf);
}
 
Example 16
Source File: TestSecureOzoneRpcClient.java    From hadoop-ozone with Apache License 2.0 4 votes vote down vote up
/**
 * Create a MiniOzoneCluster for testing.
 * <p>
 * Ozone is made active by setting OZONE_ENABLED = true
 *
 * @throws IOException
 */
@BeforeClass
public static void init() throws Exception {
  testDir = GenericTestUtils.getTestDir(
      TestSecureOzoneRpcClient.class.getSimpleName());
  OzoneManager.setTestSecureOmFlag(true);
  conf = new OzoneConfiguration();
  conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
  conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1);
  conf.setBoolean(HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED, true);
  conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
  CertificateClientTestImpl certificateClientTest =
      new CertificateClientTestImpl(conf);
  cluster = MiniOzoneCluster.newBuilder(conf)
      .setNumDatanodes(10)
      .setScmId(SCM_ID)
      .setCertificateClient(certificateClientTest)
      .build();
  String user = UserGroupInformation.getCurrentUser().getShortUserName();
  secretManager = new OzoneBlockTokenSecretManager(new SecurityConfig(conf),
      60 *60, certificateClientTest.getCertificate().
      getSerialNumber().toString());
  secretManager.start(certificateClientTest);
  Token<OzoneBlockTokenIdentifier> token = secretManager.generateToken(
      user, EnumSet.allOf(AccessModeProto.class), 60*60);
  UserGroupInformation.getCurrentUser().addToken(token);
  cluster.getOzoneManager().startSecretManager();
  cluster.waitForClusterToBeReady();
  ozClient = OzoneClientFactory.getRpcClient(conf);
  store = ozClient.getObjectStore();
  String volumeName = HddsClientUtils.getS3VolumeName(conf);
  store.createVolume(volumeName);
  storageContainerLocationClient =
      cluster.getStorageContainerLocationClient();
  ozoneManager = cluster.getOzoneManager();
  TestOzoneRpcClient.setCluster(cluster);
  TestOzoneRpcClient.setOzClient(ozClient);
  TestOzoneRpcClient.setOzoneManager(ozoneManager);
  TestOzoneRpcClient.setStorageContainerLocationClient(
      storageContainerLocationClient);
  TestOzoneRpcClient.setStore(store);
  TestOzoneRpcClient.setScmId(SCM_ID);
}
 
Example 17
Source File: TestGenerateOzoneRequiredConfigurations.java    From hadoop-ozone with Apache License 2.0 3 votes vote down vote up
/**
 * Creates output directory which will be used by the test-cases.
 * If a test-case needs a separate directory, it has to create a random
 * directory inside {@code outputBaseDir}.
 *
 * @throws Exception In case of exception while creating output directory.
 */
@BeforeClass
public static void init() throws Exception {
  outputBaseDir = GenericTestUtils.getTestDir();
  FileUtils.forceMkdir(outputBaseDir);
  genconfTool = new GenerateOzoneRequiredConfigurations();
}