Java Code Examples for org.apache.hadoop.hbase.HBaseTestingUtility#cleanupTestDir()

The following examples show how to use org.apache.hadoop.hbase.HBaseTestingUtility#cleanupTestDir() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestCacheConfig.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Test
public void testFileBucketCacheConfig() throws IOException {
  HBaseTestingUtility htu = new HBaseTestingUtility(this.conf);
  try {
    Path p = new Path(htu.getDataTestDir(), "bc.txt");
    FileSystem fs = FileSystem.get(this.conf);
    fs.create(p).close();
    this.conf.set(HConstants.BUCKET_CACHE_IOENGINE_KEY, "file:" + p);
    doBucketCacheConfigTest();
  } finally {
    htu.cleanupTestDir();
  }
}
 
Example 2
Source File: TestVerifyBucketCacheFile.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Test whether BucketCache is started normally after modifying the cache file.
 * Start BucketCache and add some blocks, then shutdown BucketCache and persist cache to file.
 * Restart BucketCache after modify cache file's data, and it can't restore cache from file,
 * the cache file and persistence file would be deleted before BucketCache start normally.
 * @throws Exception the exception
 */
@Test
public void testModifiedBucketCacheFileData() throws Exception {
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
  Path testDir = TEST_UTIL.getDataTestDir();
  TEST_UTIL.getTestFileSystem().mkdirs(testDir);

  BucketCache bucketCache =
    new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize,
      constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence");
  long usedSize = bucketCache.getAllocator().getUsedSize();
  assertEquals(0, usedSize);

  CacheTestUtils.HFileBlockPair[] blocks =
    CacheTestUtils.generateHFileBlocks(constructedBlockSize, 1);
  // Add blocks
  for (CacheTestUtils.HFileBlockPair block : blocks) {
    cacheAndWaitUntilFlushedToBucket(bucketCache, block.getBlockName(), block.getBlock());
  }
  usedSize = bucketCache.getAllocator().getUsedSize();
  assertNotEquals(0, usedSize);
  // persist cache to file
  bucketCache.shutdown();

  // modified bucket cache file
  String file = testDir + "/bucket.cache";
  try(BufferedWriter out = new BufferedWriter(new OutputStreamWriter(
    new FileOutputStream(file, false)))) {
    out.write("test bucket cache");
  }
  // can't restore cache from file
  bucketCache =
    new BucketCache("file:" + testDir + "/bucket.cache", capacitySize, constructedBlockSize,
      constructedBlockSizes, writeThreads, writerQLen, testDir + "/bucket.persistence");
  assertEquals(0, bucketCache.getAllocator().getUsedSize());
  assertEquals(0, bucketCache.backingMap.size());

  TEST_UTIL.cleanupTestDir();
}
 
Example 3
Source File: TestRSKilledWhenInitializing.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Test verifies whether a region server is removed from online servers list in master if it went
 * down after registering with master. Test will TIMEOUT if an error!!!!
 * @throws Exception
 */
@Test
public void testRSTerminationAfterRegisteringToMasterBeforeCreatingEphemeralNode()
throws Exception {
  // Create config to use for this cluster
  Configuration conf = HBaseConfiguration.create();
  conf.setInt(ServerManager.WAIT_ON_REGIONSERVERS_MINTOSTART, 1);
  // Start the cluster
  final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
  TEST_UTIL.startMiniDFSCluster(3);
  TEST_UTIL.startMiniZKCluster();
  TEST_UTIL.createRootDir();
  final LocalHBaseCluster cluster = new LocalHBaseCluster(conf, NUM_MASTERS, NUM_RS,
      HMaster.class, RegisterAndDieRegionServer.class);
  final MasterThread master = startMaster(cluster.getMasters().get(0));
  try {
    // Master is up waiting on RegionServers to check in. Now start RegionServers.
    for (int i = 0; i < NUM_RS; i++) {
      cluster.getRegionServers().get(i).start();
    }
    // Expected total regionservers depends on whether Master can host regions or not.
    int expectedTotalRegionServers = NUM_RS + (LoadBalancer.isTablesOnMaster(conf)? 1: 0);
    List<ServerName> onlineServersList = null;
    do {
      onlineServersList = master.getMaster().getServerManager().getOnlineServersList();
    } while (onlineServersList.size() < expectedTotalRegionServers);
    // Wait until killedRS is set. Means RegionServer is starting to go down.
    while (killedRS.get() == null) {
      Threads.sleep(1);
    }
    // Wait on the RegionServer to fully die.
    while (cluster.getLiveRegionServers().size() >= expectedTotalRegionServers) {
      Threads.sleep(1);
    }
    // Make sure Master is fully up before progressing. Could take a while if regions
    // being reassigned.
    while (!master.getMaster().isInitialized()) {
      Threads.sleep(1);
    }

    // Now in steady state. How many regions open? Master should have too many regionservers
    // showing still. The downed RegionServer should still be showing as registered.
    assertTrue(master.getMaster().getServerManager().isServerOnline(killedRS.get()));
    // Find non-meta region (namespace?) and assign to the killed server. That'll trigger cleanup.
    Map<RegionInfo, ServerName> assignments = null;
    do {
      assignments = master.getMaster().getAssignmentManager().getRegionStates().getRegionAssignments();
    } while (assignments == null || assignments.size() < 2);
    RegionInfo hri = null;
    for (Map.Entry<RegionInfo, ServerName> e: assignments.entrySet()) {
      if (e.getKey().isMetaRegion()) continue;
      hri = e.getKey();
      break;
    }
    // Try moving region to the killed server. It will fail. As by-product, we will
    // remove the RS from Master online list because no corresponding znode.
    assertEquals(expectedTotalRegionServers,
      master.getMaster().getServerManager().getOnlineServersList().size());
    LOG.info("Move " + hri.getEncodedName() + " to " + killedRS.get());
    master.getMaster().move(hri.getEncodedNameAsBytes(),
        Bytes.toBytes(killedRS.get().toString()));

    // TODO: This test could do more to verify fix. It could create a table
    // and do round-robin assign. It should fail if zombie RS. HBASE-19515.

    // Wait until the RS no longer shows as registered in Master.
    while (onlineServersList.size() > (NUM_RS + 1)) {
      Thread.sleep(100);
      onlineServersList = master.getMaster().getServerManager().getOnlineServersList();
    }
  } finally {
    // Shutdown is messy with complaints about fs being closed. Why? TODO.
    cluster.shutdown();
    cluster.join();
    TEST_UTIL.shutdownMiniDFSCluster();
    TEST_UTIL.shutdownMiniZKCluster();
    TEST_UTIL.cleanupTestDir();
  }
}
 
Example 4
Source File: RunLocalTest.java    From hadoop-arch-book with Apache License 2.0 2 votes vote down vote up
public static void main(String[] args) throws Exception{

    HBaseTestingUtility htu = HBaseTestingUtility.createLocalHTU();
    Configuration config = htu.getConfiguration();

    htu.cleanupTestDir();
    htu.startMiniZKCluster();
    htu.startMiniHBaseCluster(1, 1);

    RemoveTables.executeDeleteTables(config);

    CreateTables.executeCreateTables(config);



    //Start up servers
    Server flumeTestServer = startTestFlumeServer(4243);

    List<String> flumePorts = new ArrayList<String>();
    flumePorts.add("127.0.0.1:4243");
    EventReviewServer server = new EventReviewServer(4242, config, flumePorts, false);
    server.startServer();

    EventClient client = new EventClient("127.0.0.1", 4242);
    client.startClient();

    HConnection connection = HConnectionManager.createConnection(config);

    //popoulate initial data
    populateUserProfileData(connection);
    populateValidationRules(connection);

    //populate user events
    UserEvent userEvent = new UserEvent("101", System.currentTimeMillis(),
            "127.0.0.1", "1", "55555",
            "42", 100.0, "101", true);

    client.submitUserEvent(userEvent);

    //shut down servers
    client.closeClient();
    server.closeServer();
    stopTestFlumeServer(flumeTestServer);
    htu.shutdownMiniCluster();

  }