Java Code Examples for org.apache.hadoop.hbase.HBaseTestingUtility#shutdownMiniCluster()

The following examples show how to use org.apache.hadoop.hbase.HBaseTestingUtility#shutdownMiniCluster() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestFileLink.java    From hbase with Apache License 2.0 6 votes vote down vote up
/**
 * Test, on HDFS, that the FileLink is still readable
 * even when the current file gets renamed.
 */
@Test
public void testHDFSLinkReadDuringRename() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
 
Example 2
Source File: HbaseTestUtil.java    From kafka-connect-hbase with Apache License 2.0 5 votes vote down vote up
/**
 * stops the mini cluster
 */
public static void stopMiniCluster() {
    HBaseTestingUtility testingUtility = getUtility();
    if (testingUtility != null && status.compareAndSet(true, false)) {
        try {
            testingUtility.shutdownMiniCluster();
        } catch (Exception e) {
            status.set(true);
            throw new RuntimeException("Unable to shutdown MiniCluster", e);
        }
    }
}
 
Example 3
Source File: TestWithHBaseCoprocessor.java    From eagle with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setUpHBase() throws IOException {
    System.setProperty("config.resource", "/application-co.conf");
    Configuration conf = HBaseConfiguration.create();
    conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, AggregateProtocolEndPoint.class.getName());
    conf.set("zookeeper.znode.parent", getZkZnodeParent());
    conf.setInt("hbase.master.info.port", -1);//avoid port clobbering
    conf.setInt("hbase.regionserver.info.port", -1);//avoid port clobbering

    int attempts = 0;
    hbase = new HBaseTestingUtility(conf);
    boolean successToStart = false;
    while (attempts < 3) {
        try {
            attempts ++;
            hbase.startMiniCluster();
            successToStart = true;
        } catch (Exception e) {
            LOG.error("Error to start mini cluster (tried {} times): {}", attempts, e.getMessage(), e);
            try {
                hbase.shutdownMiniCluster();
            } catch (Exception e1) {
                LOG.warn(e.getMessage(), e);
            }
        }
    }

    Assert.assertTrue("Failed to start mini cluster in " + attempts + " attempts", successToStart);

    HTable table = hbase.createTable(String.valueOf("unittest"),"f");
    HTableDescriptor descriptor = new HTableDescriptor(table.getTableDescriptor());
    descriptor.addCoprocessor(AggregateProtocolEndPoint.class.getName());
    hbase.getHBaseAdmin().modifyTable("unittest",descriptor);

    System.setProperty("storage.hbase.autoCreateTable","false");
    System.setProperty("storage.hbase.coprocessorEnabled", String.valueOf(true));
    System.setProperty("storage.hbase.zookeeperZnodeParent", getZkZnodeParent());
    System.setProperty("storage.hbase.zookeeperPropertyClientPort", String.valueOf(hbase.getZkCluster().getClientPort()));
}
 
Example 4
Source File: SyncReplicationTestBase.java    From hbase with Apache License 2.0 5 votes vote down vote up
private static void shutdown(HBaseTestingUtility util) throws Exception {
  if (util.getHBaseCluster() == null) {
    return;
  }
  Admin admin = util.getAdmin();
  if (!admin.listReplicationPeers(Pattern.compile(PEER_ID)).isEmpty()) {
    if (admin
      .getReplicationPeerSyncReplicationState(PEER_ID) != SyncReplicationState.DOWNGRADE_ACTIVE) {
      admin.transitReplicationPeerSyncReplicationState(PEER_ID,
        SyncReplicationState.DOWNGRADE_ACTIVE);
    }
    admin.removeReplicationPeer(PEER_ID);
  }
  util.shutdownMiniCluster();
}
 
Example 5
Source File: TestMasterFailoverBalancerPersistence.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Test that if the master fails, the load balancer maintains its
 * state (running or not) when the next master takes over
 *
 * @throws Exception
 */
@Test
public void testMasterFailoverBalancerPersistence() throws Exception {
  // Start the cluster
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();

  StartMiniClusterOption option = StartMiniClusterOption.builder()
      .numMasters(3).build();
  TEST_UTIL.startMiniCluster(option);
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();

  assertTrue(cluster.waitForActiveAndReadyMaster());
  HMaster active = cluster.getMaster();
  // check that the balancer is on by default for the active master
  ClusterMetrics clusterStatus = active.getClusterMetrics();
  assertTrue(clusterStatus.getBalancerOn());

  active = killActiveAndWaitForNewActive(cluster);

  // ensure the load balancer is still running on new master
  clusterStatus = active.getClusterMetrics();
  assertTrue(clusterStatus.getBalancerOn());

  // turn off the load balancer
  active.balanceSwitch(false);

  // once more, kill active master and wait for new active master to show up
  active = killActiveAndWaitForNewActive(cluster);

  // ensure the load balancer is not running on the new master
  clusterStatus = active.getClusterMetrics();
  assertFalse(clusterStatus.getBalancerOn());

  // Stop the cluster
  TEST_UTIL.shutdownMiniCluster();
}
 
Example 6
Source File: IndexAsyncThresholdIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@AfterParam
public static synchronized void tearDownMiniCluster() throws Exception {
    destroyDriver(driver);
    try {
        HBaseTestingUtility u = new HBaseTestingUtility();
        u.shutdownMiniCluster();
    } catch (Throwable t) {
        logger.error("Exception caught when shutting down mini cluster", t);
    } finally {
        ConnectionFactory.shutdown();
    }
}
 
Example 7
Source File: FailForUnsupportedHBaseVersionsIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Test that we correctly abort a RegionServer when we run tests with an unsupported HBase
 * version. The 'completeness' of this test requires that we run the test with both a version of
 * HBase that wouldn't be supported with WAL Compression. Currently, this is the default version
 * (0.94.4) so just running 'mvn test' will run the full test. However, this test will not fail
 * when running against a version of HBase with WALCompression enabled. Therefore, to fully test
 * this functionality, we need to run the test against both a supported and an unsupported version
 * of HBase (as long as we want to support an version of HBase that doesn't support custom WAL
 * Codecs).
 * @throws Exception on failure
 */
@Test(timeout = 300000 /* 5 mins */)
public void testDoesNotStartRegionServerForUnsupportedCompressionAndVersion() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    setUpConfigForMiniCluster(conf);
    IndexTestingUtils.setupConfig(conf);
    // enable WAL Compression
    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);

    // check the version to see if it isn't supported
    String version = VersionInfo.getVersion();
    boolean supported = false;
    if (Indexer.validateVersion(version, conf) == null) {
        supported = true;
    }

    // start the minicluster
    HBaseTestingUtility util = new HBaseTestingUtility(conf);
    // set replication required parameter
    ConfigUtil.setReplicationConfigIfAbsent(conf);
    try {
        util.startMiniCluster();

        // setup the primary table
        @SuppressWarnings("deprecation")
        HTableDescriptor desc = new HTableDescriptor(
                "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion");
        byte[] family = Bytes.toBytes("f");
        desc.addFamily(new HColumnDescriptor(family));

        // enable indexing to a non-existant index table
        String indexTableName = "INDEX_TABLE";
        ColumnGroup fam1 = new ColumnGroup(indexTableName);
        fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
        CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
        builder.addIndexGroup(fam1);
        builder.build(desc);

        // get a reference to the regionserver, so we can ensure it aborts
        HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0);

        // create the primary table
        HBaseAdmin admin = util.getHBaseAdmin();
        if (supported) {
            admin.createTable(desc);
            assertFalse("Hosting regeion server failed, even the HBase version (" + version
                    + ") supports WAL Compression.", server.isAborted());
        } else {
            admin.createTableAsync(desc, null);

            // wait for the regionserver to abort - if this doesn't occur in the timeout, assume its
            // broken.
            while (!server.isAborted()) {
                LOG.debug("Waiting on regionserver to abort..");
            }
        }
    } finally {
        // cleanup
        util.shutdownMiniCluster();
    }
}
 
Example 8
Source File: TestMasterRestartAfterDisablingTable.java    From hbase with Apache License 2.0 4 votes vote down vote up
@Test
public void testForCheckingIfEnableAndDisableWorksFineAfterSwitch()
    throws Exception {
  final int NUM_MASTERS = 2;
  final int NUM_REGIONS_TO_CREATE = 4;

  // Start the cluster
  log("Starting cluster");
  Configuration conf = HBaseConfiguration.create();
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility(conf);
  StartMiniClusterOption option = StartMiniClusterOption.builder()
      .numMasters(NUM_MASTERS).build();
  TEST_UTIL.startMiniCluster(option);
  MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
  log("Waiting for active/ready master");
  cluster.waitForActiveAndReadyMaster();

  // Create a table with regions
  final TableName tableName = TableName.valueOf(name.getMethodName());
  byte[] family = Bytes.toBytes("family");
  log("Creating table with " + NUM_REGIONS_TO_CREATE + " regions");
  Table ht = TEST_UTIL.createMultiRegionTable(tableName, family, NUM_REGIONS_TO_CREATE);
  int numRegions = -1;
  try (RegionLocator r = TEST_UTIL.getConnection().getRegionLocator(tableName)) {
    numRegions = r.getStartKeys().length;
  }
  numRegions += 1; // catalogs
  log("Waiting for no more RIT\n");
  TEST_UTIL.waitUntilNoRegionsInTransition(60000);
  log("Disabling table\n");
  TEST_UTIL.getAdmin().disableTable(tableName);

  NavigableSet<String> regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
  assertEquals("The number of regions for the table tableRestart should be 0 and only" +
    "the catalog table should be present.", 1, regions.size());

  List<MasterThread> masterThreads = cluster.getMasterThreads();
  MasterThread activeMaster = null;
  if (masterThreads.get(0).getMaster().isActiveMaster()) {
    activeMaster = masterThreads.get(0);
  } else {
    activeMaster = masterThreads.get(1);
  }
  activeMaster.getMaster().stop(
      "stopping the active master so that the backup can become active");
  cluster.hbaseCluster.waitOnMaster(activeMaster);
  cluster.waitForActiveAndReadyMaster();

  assertTrue("The table should not be in enabled state",
      cluster.getMaster().getTableStateManager().isTableState(
      TableName.valueOf(name.getMethodName()), TableState.State.DISABLED,
      TableState.State.DISABLING));
  log("Enabling table\n");
  // Need a new Admin, the previous one is on the old master
  Admin admin = TEST_UTIL.getAdmin();
  admin.enableTable(tableName);
  admin.close();
  log("Waiting for no more RIT\n");
  TEST_UTIL.waitUntilNoRegionsInTransition(60000);
  log("Verifying there are " + numRegions + " assigned on cluster\n");
  regions = HBaseTestingUtility.getAllOnlineRegions(cluster);
  assertEquals("The assigned regions were not onlined after master" +
    " switch except for the catalog table.", 5, regions.size());
  assertTrue("The table should be in enabled state", cluster.getMaster().getTableStateManager()
    .isTableState(TableName.valueOf(name.getMethodName()), TableState.State.ENABLED));
  ht.close();
  TEST_UTIL.shutdownMiniCluster();
}
 
Example 9
Source File: TestMasterFailover.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Test meta in transition when master failover.
 * This test used to manipulate region state up in zk. That is not allowed any more in hbase2
 * so I removed that messing. That makes this test anemic.
 */
@Test
public void testMetaInTransitionWhenMasterFailover() throws Exception {
  // Start the cluster
  HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
  TEST_UTIL.startMiniCluster();
  try {
    MiniHBaseCluster cluster = TEST_UTIL.getHBaseCluster();
    LOG.info("Cluster started");

    HMaster activeMaster = cluster.getMaster();
    ServerName metaServerName = cluster.getServerHoldingMeta();
    HRegionServer hrs = cluster.getRegionServer(metaServerName);

    // Now kill master, meta should remain on rs, where we placed it before.
    LOG.info("Aborting master");
    activeMaster.abort("test-kill");
    cluster.waitForMasterToStop(activeMaster.getServerName(), 30000);
    LOG.info("Master has aborted");

    // meta should remain where it was
    RegionState metaState = MetaTableLocator.getMetaRegionState(hrs.getZooKeeper());
    assertEquals("hbase:meta should be online on RS",
        metaState.getServerName(), metaServerName);
    assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState());

    // Start up a new master
    LOG.info("Starting up a new master");
    activeMaster = cluster.startMaster().getMaster();
    LOG.info("Waiting for master to be ready");
    cluster.waitForActiveAndReadyMaster();
    LOG.info("Master is ready");

    // ensure meta is still deployed on RS
    metaState = MetaTableLocator.getMetaRegionState(activeMaster.getZooKeeper());
    assertEquals("hbase:meta should be online on RS",
        metaState.getServerName(), metaServerName);
    assertEquals("hbase:meta should be online on RS", State.OPEN, metaState.getState());

    // Done, shutdown the cluster
  } finally {
    TEST_UTIL.shutdownMiniCluster();
  }
}
 
Example 10
Source File: TestMasterShutdown.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Simple test of shutdown.
 * <p>
 * Starts with three masters.  Tells the active master to shutdown the cluster.
 * Verifies that all masters are properly shutdown.
 */
@Test
public void testMasterShutdown() throws Exception {
  // Create config to use for this cluster
  Configuration conf = HBaseConfiguration.create();

  // Start the cluster
  try {
    htu = new HBaseTestingUtility(conf);
    StartMiniClusterOption option = StartMiniClusterOption.builder()
      .numMasters(3)
      .numRegionServers(1)
      .numDataNodes(1)
      .build();
    final MiniHBaseCluster cluster = htu.startMiniCluster(option);

    // wait for all master thread to spawn and start their run loop.
    final long thirtySeconds = TimeUnit.SECONDS.toMillis(30);
    final long oneSecond = TimeUnit.SECONDS.toMillis(1);
    assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond, () -> {
      final List<MasterThread> masterThreads = cluster.getMasterThreads();
      return masterThreads != null
        && masterThreads.size() >= 3
        && masterThreads.stream().allMatch(Thread::isAlive);
    }));

    // find the active master
    final HMaster active = cluster.getMaster();
    assertNotNull(active);

    // make sure the other two are backup masters
    ClusterMetrics status = active.getClusterMetrics();
    assertEquals(2, status.getBackupMasterNames().size());

    // tell the active master to shutdown the cluster
    active.shutdown();
    assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond,
      () -> CollectionUtils.isEmpty(cluster.getLiveMasterThreads())));
    assertNotEquals(-1, htu.waitFor(thirtySeconds, oneSecond,
      () -> CollectionUtils.isEmpty(cluster.getLiveRegionServerThreads())));
  } finally {
    if (htu != null) {
      htu.shutdownMiniCluster();
      htu = null;
    }
  }
}
 
Example 11
Source File: TestFileLink.java    From hbase with Apache License 2.0 4 votes vote down vote up
/**
 * Test that link is still readable even when the current file gets deleted.
 *
 * NOTE: This test is valid only on HDFS.
 * When a file is deleted from a local file-system, it is simply 'unlinked'.
 * The inode, which contains the file's data, is not deleted until all
 * processes have finished with it.
 * In HDFS when the request exceed the cached block locations,
 * a query to the namenode is performed, using the filename,
 * and the deleted file doesn't exists anymore (FileNotFoundException).
 */
@Test
public void testHDFSLinkReadDuringDelete() throws Exception {
  HBaseTestingUtility testUtil = new HBaseTestingUtility();
  Configuration conf = testUtil.getConfiguration();
  conf.setInt("dfs.blocksize", 1024 * 1024);
  conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

  testUtil.startMiniDFSCluster(1);
  MiniDFSCluster cluster = testUtil.getDFSCluster();
  FileSystem fs = cluster.getFileSystem();
  assertEquals("hdfs", fs.getUri().getScheme());

  try {
    List<Path> files = new ArrayList<>();
    for (int i = 0; i < 3; i++) {
      Path path = new Path(String.format("test-data-%d", i));
      writeSomeData(fs, path, 1 << 20, (byte)i);
      files.add(path);
    }

    FileLink link = new FileLink(files);
    FSDataInputStream in = link.open(fs);
    try {
      byte[] data = new byte[8192];
      int n;

      // Switch to file 1
      n = in.read(data);
      dataVerify(data, n, (byte)0);
      fs.delete(files.get(0), true);
      skipBuffer(in, (byte)0);

      // Switch to file 2
      n = in.read(data);
      dataVerify(data, n, (byte)1);
      fs.delete(files.get(1), true);
      skipBuffer(in, (byte)1);

      // Switch to file 3
      n = in.read(data);
      dataVerify(data, n, (byte)2);
      fs.delete(files.get(2), true);
      skipBuffer(in, (byte)2);

      // No more files available
      try {
        n = in.read(data);
        assert(n <= 0);
      } catch (FileNotFoundException e) {
        assertTrue(true);
      }
    } finally {
      in.close();
    }
  } finally {
    testUtil.shutdownMiniCluster();
  }
}
 
Example 12
Source File: FailForUnsupportedHBaseVersionsIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Test that we correctly abort a RegionServer when we run tests with an unsupported HBase
 * version. The 'completeness' of this test requires that we run the test with both a version of
 * HBase that wouldn't be supported with WAL Compression. Currently, this is the default version
 * (0.94.4) so just running 'mvn test' will run the full test. However, this test will not fail
 * when running against a version of HBase with WALCompression enabled. Therefore, to fully test
 * this functionality, we need to run the test against both a supported and an unsupported version
 * of HBase (as long as we want to support an version of HBase that doesn't support custom WAL
 * Codecs).
 * @throws Exception on failure
 */
@Test(timeout = 300000 /* 5 mins */)
public void testDoesNotStartRegionServerForUnsupportedCompressionAndVersion() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    setUpConfigForMiniCluster(conf);
    IndexTestingUtils.setupConfig(conf);
    // enable WAL Compression
    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);

    // check the version to see if it isn't supported
    String version = VersionInfo.getVersion();
    boolean supported = false;
    if (Indexer.validateVersion(version, conf) == null) {
        supported = true;
    }

    // start the minicluster
    HBaseTestingUtility util = new HBaseTestingUtility(conf);
    util.startMiniCluster();

    try {
        // setup the primary table
        TableDescriptorBuilder descBuilder = TableDescriptorBuilder.newBuilder(TableName.valueOf(
                "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion"));
        byte[] family = Bytes.toBytes("f");
        
        descBuilder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(family));
        TableDescriptor desc=descBuilder.build();
        // enable indexing to a non-existant index table
        String indexTableName = "INDEX_TABLE";
        ColumnGroup fam1 = new ColumnGroup(indexTableName);
        fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
        CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
        builder.addIndexGroup(fam1);
        builder.build(desc);

        // get a reference to the regionserver, so we can ensure it aborts
        HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0);

        // create the primary table
        Admin admin = util.getAdmin();
        if (supported) {
            admin.createTable(desc);
            assertFalse("Hosting regeion server failed, even the HBase version (" + version
                    + ") supports WAL Compression.", server.isAborted());
        } else {
            admin.createTableAsync(desc, null);

            // wait for the regionserver to abort - if this doesn't occur in the timeout, assume its
            // broken.
            while (!server.isAborted()) {
                LOGGER.debug("Waiting on regionserver to abort..");
            }
        }

    } finally {
        // cleanup
        util.shutdownMiniCluster();
    }
}
 
Example 13
Source File: TestFailForUnsupportedHBaseVersions.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * Test that we correctly abort a RegionServer when we run tests with an unsupported HBase
 * version. The 'completeness' of this test requires that we run the test with both a version of
 * HBase that wouldn't be supported with WAL Compression. Currently, this is the default version
 * (0.94.4) so just running 'mvn test' will run the full test. However, this test will not fail
 * when running against a version of HBase with WALCompression enabled. Therefore, to fully test
 * this functionality, we need to run the test against both a supported and an unsupported version
 * of HBase (as long as we want to support an version of HBase that doesn't support custom WAL
 * Codecs).
 * @throws Exception on failure
 */
@Test(timeout = 300000 /* 5 mins */)
public void testDoesNotStartRegionServerForUnsupportedCompressionAndVersion() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  IndexTestingUtils.setupConfig(conf);
  // enable WAL Compression
  conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, true);

  // check the version to see if it isn't supported
  String version = VersionInfo.getVersion();
  boolean supported = false;
  if (Indexer.validateVersion(version, conf) == null) {
    supported = true;
  }

  // start the minicluster
  HBaseTestingUtility util = new HBaseTestingUtility(conf);
  util.startMiniCluster();

  // setup the primary table
  HTableDescriptor desc = new HTableDescriptor(
      "testDoesNotStartRegionServerForUnsupportedCompressionAndVersion");
  byte[] family = Bytes.toBytes("f");
  desc.addFamily(new HColumnDescriptor(family));

  // enable indexing to a non-existant index table
  String indexTableName = "INDEX_TABLE";
  ColumnGroup fam1 = new ColumnGroup(indexTableName);
  fam1.add(new CoveredColumn(family, CoveredColumn.ALL_QUALIFIERS));
  CoveredColumnIndexSpecifierBuilder builder = new CoveredColumnIndexSpecifierBuilder();
  builder.addIndexGroup(fam1);
  builder.build(desc);

  // get a reference to the regionserver, so we can ensure it aborts
  HRegionServer server = util.getMiniHBaseCluster().getRegionServer(0);

  // create the primary table
  HBaseAdmin admin = util.getHBaseAdmin();
  if (supported) {
    admin.createTable(desc);
    assertFalse("Hosting regeion server failed, even the HBase version (" + version
        + ") supports WAL Compression.", server.isAborted());
  } else {
    admin.createTableAsync(desc, null);

    // wait for the regionserver to abort - if this doesn't occur in the timeout, assume its
    // broken.
    while (!server.isAborted()) {
      LOG.debug("Waiting on regionserver to abort..");
    }
  }

  // cleanup
  util.shutdownMiniCluster();
}
 
Example 14
Source File: RunLocalTest.java    From hadoop-arch-book with Apache License 2.0 2 votes vote down vote up
public static void main(String[] args) throws Exception{

    HBaseTestingUtility htu = HBaseTestingUtility.createLocalHTU();
    Configuration config = htu.getConfiguration();

    htu.cleanupTestDir();
    htu.startMiniZKCluster();
    htu.startMiniHBaseCluster(1, 1);

    RemoveTables.executeDeleteTables(config);

    CreateTables.executeCreateTables(config);



    //Start up servers
    Server flumeTestServer = startTestFlumeServer(4243);

    List<String> flumePorts = new ArrayList<String>();
    flumePorts.add("127.0.0.1:4243");
    EventReviewServer server = new EventReviewServer(4242, config, flumePorts, false);
    server.startServer();

    EventClient client = new EventClient("127.0.0.1", 4242);
    client.startClient();

    HConnection connection = HConnectionManager.createConnection(config);

    //popoulate initial data
    populateUserProfileData(connection);
    populateValidationRules(connection);

    //populate user events
    UserEvent userEvent = new UserEvent("101", System.currentTimeMillis(),
            "127.0.0.1", "1", "55555",
            "42", 100.0, "101", true);

    client.submitUserEvent(userEvent);

    //shut down servers
    client.closeClient();
    server.closeServer();
    stopTestFlumeServer(flumeTestServer);
    htu.shutdownMiniCluster();

  }