Java Code Examples for org.apache.hadoop.hbase.HBaseTestingUtility#shutdownMiniHBaseCluster()

The following examples show how to use org.apache.hadoop.hbase.HBaseTestingUtility#shutdownMiniHBaseCluster() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestTableSnapshotScanner.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions,
    boolean shutdownCluster) throws Exception {
  setupCluster();
  TableName tableName = TableName.valueOf("testScanner");
  try {
    createTableAndSnapshot(util, tableName, snapshotName, numRegions);

    if (shutdownCluster) {
      util.shutdownMiniHBaseCluster();
    }

    Path restoreDir = util.getDataTestDirOnTestFS(snapshotName);
    Scan scan = new Scan().withStartRow(bbb).withStopRow(yyy); // limit the scan

    TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir,
      snapshotName, scan);

    verifyScanner(scanner, bbb, yyy);
    scanner.close();
  } finally {
    if (!shutdownCluster) {
      util.getAdmin().deleteSnapshot(snapshotName);
      util.deleteTable(tableName);
      tearDownCluster();
    }
  }
}
 
Example 2
Source File: TestTableSnapshotInputFormat.java    From hbase with Apache License 2.0 4 votes vote down vote up
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
    String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
    int numSplitsPerRegion,int expectedNumSplits, boolean shutdownCluster) throws Exception {

  //create the table and snapshot
  createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);

  if (shutdownCluster) {
    util.shutdownMiniHBaseCluster();
  }

  try {
    // create the job
    JobConf jobConf = new JobConf(util.getConfiguration());

    jobConf.setJarByClass(util.getClass());
    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(jobConf,
      TestTableSnapshotInputFormat.class);

    if(numSplitsPerRegion > 1) {
      TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS,
              TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, jobConf, true, tableDir, new RegionSplitter.UniformSplit(),
              numSplitsPerRegion);
    } else {
      TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS,
              TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, jobConf, true, tableDir);
    }

    jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
    jobConf.setNumReduceTasks(1);
    jobConf.setOutputFormat(NullOutputFormat.class);

    RunningJob job = JobClient.runJob(jobConf);
    Assert.assertTrue(job.isSuccessful());
  } finally {
    if (!shutdownCluster) {
      util.getAdmin().deleteSnapshot(snapshotName);
      util.deleteTable(tableName);
    }
  }
}
 
Example 3
Source File: TestTableSnapshotInputFormat.java    From hbase with Apache License 2.0 4 votes vote down vote up
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
    String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
    int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception {

  LOG.info("testing with MapReduce");

  LOG.info("create the table and snapshot");
  createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);

  if (shutdownCluster) {
    LOG.info("shutting down hbase cluster.");
    util.shutdownMiniHBaseCluster();
  }

  try {
    // create the job
    Job job = new Job(util.getConfiguration());
    Scan scan = new Scan().withStartRow(startRow).withStopRow(endRow); // limit the scan

    job.setJarByClass(util.getClass());
    TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
            TestTableSnapshotInputFormat.class);

    if (numSplitsPerRegion > 1) {
      TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
              scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, job, true, tableDir, new RegionSplitter.UniformSplit(),
              numSplitsPerRegion);
    } else {
      TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
              scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, job, true, tableDir);
    }

    job.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
    job.setNumReduceTasks(1);
    job.setOutputFormatClass(NullOutputFormat.class);

    Assert.assertTrue(job.waitForCompletion(true));
  } finally {
    if (!shutdownCluster) {
      util.getAdmin().deleteSnapshot(snapshotName);
      util.deleteTable(tableName);
    }
  }
}