Java Code Examples for org.apache.hadoop.hbase.HBaseTestingUtility#deleteTable()

The following examples show how to use org.apache.hadoop.hbase.HBaseTestingUtility#deleteTable() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HBaseMiniCluster.java    From yuzhouwan with Apache License 2.0 6 votes vote down vote up
private HBaseTestingUtility hbaseOperation() throws Exception {

        HBaseTestingUtility hbaseTestingUtility = new HBaseTestingUtility();
        /**
         * # fsOwner's name is Benedict Jin, will throw exception: Illegal character in path at index 42
         * hbaseTestingUtility.getTestFileSystem().setOwner(new Path(BASE_PATH.concat("/owner")), "Benedict Jin", "supergroup");
         */
        MiniHBaseCluster hbaseCluster = hbaseTestingUtility.startMiniCluster();

        hbaseTestingUtility.createTable(Bytes.toBytes(TABLE_NAME), Bytes.toBytes("context"));
        hbaseTestingUtility.deleteTable(Bytes.toBytes(TABLE_NAME));

        Configuration config = hbaseCluster.getConf();
        Connection conn = ConnectionFactory.createConnection(config);
        HBaseAdmin hbaseAdmin = new HBaseAdmin(conn);

        HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(TABLE_NAME));
        hbaseAdmin.createTable(desc);
        return hbaseTestingUtility;
    }
 
Example 2
Source File: TestTableSnapshotScanner.java    From hbase with Apache License 2.0 5 votes vote down vote up
public static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName,
    String snapshotName, int numRegions)
    throws Exception {
  try {
    util.deleteTable(tableName);
  } catch(Exception ex) {
    // ignore
  }

  if (numRegions > 1) {
    util.createTable(tableName, FAMILIES, 1, bbb, yyy, numRegions);
  } else {
    util.createTable(tableName, FAMILIES);
  }
  Admin admin = util.getAdmin();

  // put some stuff in the table
  Table table = util.getConnection().getTable(tableName);
  util.loadTable(table, FAMILIES);

  Path rootDir = CommonFSUtils.getRootDir(util.getConfiguration());
  FileSystem fs = rootDir.getFileSystem(util.getConfiguration());

  SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
      Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true);

  // load different values
  byte[] value = Bytes.toBytes("after_snapshot_value");
  util.loadTable(table, FAMILIES, value);

  // cause flush to create new files in the region
  admin.flush(tableName);
  table.close();
}
 
Example 3
Source File: TestTableSnapshotScanner.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void testScanner(HBaseTestingUtility util, String snapshotName, int numRegions,
    boolean shutdownCluster) throws Exception {
  setupCluster();
  TableName tableName = TableName.valueOf("testScanner");
  try {
    createTableAndSnapshot(util, tableName, snapshotName, numRegions);

    if (shutdownCluster) {
      util.shutdownMiniHBaseCluster();
    }

    Path restoreDir = util.getDataTestDirOnTestFS(snapshotName);
    Scan scan = new Scan().withStartRow(bbb).withStopRow(yyy); // limit the scan

    TableSnapshotScanner scanner = new TableSnapshotScanner(UTIL.getConfiguration(), restoreDir,
      snapshotName, scan);

    verifyScanner(scanner, bbb, yyy);
    scanner.close();
  } finally {
    if (!shutdownCluster) {
      util.getAdmin().deleteSnapshot(snapshotName);
      util.deleteTable(tableName);
      tearDownCluster();
    }
  }
}
 
Example 4
Source File: TestTableSnapshotInputFormat.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
protected void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
    int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo)
    throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  try {
    createTableAndSnapshot(
      util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions);

    JobConf job = new JobConf(util.getConfiguration());
    // setLocalityEnabledTo is ignored no matter what is specified, so as to test the case that
    // SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY is not explicitly specified
    // and the default value is taken.
    Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName);

    if (numSplitsPerRegion > 1) {
      TableMapReduceUtil.initTableSnapshotMapJob(snapshotName,
              COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(),
              numSplitsPerRegion);
    } else {
      TableMapReduceUtil.initTableSnapshotMapJob(snapshotName,
              COLUMNS, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, job, false, tmpTableDir);
    }

    // mapred doesn't support start and end keys? o.O
    verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow());

  } finally {
    util.getAdmin().deleteSnapshot(snapshotName);
    util.deleteTable(tableName);
  }
}
 
Example 5
Source File: TestTableSnapshotInputFormat.java    From hbase with Apache License 2.0 5 votes vote down vote up
@Override
public void testWithMockedMapReduce(HBaseTestingUtility util, String snapshotName,
    int numRegions, int numSplitsPerRegion, int expectedNumSplits, boolean setLocalityEnabledTo)
    throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  try {
    createTableAndSnapshot(
      util, tableName, snapshotName, getStartRow(), getEndRow(), numRegions);

    Configuration conf = util.getConfiguration();
    conf.setBoolean(SNAPSHOT_INPUTFORMAT_LOCALITY_ENABLED_KEY, setLocalityEnabledTo);
    Job job = new Job(conf);
    Path tmpTableDir = util.getDataTestDirOnTestFS(snapshotName);
    Scan scan = new Scan().withStartRow(getStartRow()).withStopRow(getEndRow()); // limit the scan

    if (numSplitsPerRegion > 1) {
      TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
              scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, job, false, tmpTableDir, new RegionSplitter.UniformSplit(),
              numSplitsPerRegion);
    } else {
      TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
              scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, job, false, tmpTableDir);
    }

    verifyWithMockedMapReduce(job, numRegions, expectedNumSplits, getStartRow(), getEndRow());

  } finally {
    util.getAdmin().deleteSnapshot(snapshotName);
    util.deleteTable(tableName);
  }
}
 
Example 6
Source File: TableSnapshotInputFormatTestBase.java    From hbase with Apache License 2.0 5 votes vote down vote up
protected static void createTableAndSnapshot(HBaseTestingUtility util, TableName tableName,
  String snapshotName, byte[] startRow, byte[] endRow, int numRegions)
  throws Exception {
  try {
    LOG.debug("Ensuring table doesn't exist.");
    util.deleteTable(tableName);
  } catch(Exception ex) {
    // ignore
  }

  LOG.info("creating table '" + tableName + "'");
  if (numRegions > 1) {
    util.createTable(tableName, FAMILIES, 1, startRow, endRow, numRegions);
  } else {
    util.createTable(tableName, FAMILIES);
  }
  Admin admin = util.getAdmin();

  LOG.info("put some stuff in the table");
  Table table = util.getConnection().getTable(tableName);
  util.loadTable(table, FAMILIES);

  Path rootDir = CommonFSUtils.getRootDir(util.getConfiguration());
  FileSystem fs = rootDir.getFileSystem(util.getConfiguration());

  LOG.info("snapshot");
  SnapshotTestingUtils.createSnapshotAndValidate(admin, tableName,
    Arrays.asList(FAMILIES), null, snapshotName, rootDir, fs, true);

  LOG.info("load different values");
  byte[] value = Bytes.toBytes("after_snapshot_value");
  util.loadTable(table, FAMILIES, value);

  LOG.info("cause flush to create new files in the region");
  admin.flush(tableName);
  table.close();
}
 
Example 7
Source File: TestTableSnapshotInputFormat.java    From hbase with Apache License 2.0 4 votes vote down vote up
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
    String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
    int numSplitsPerRegion,int expectedNumSplits, boolean shutdownCluster) throws Exception {

  //create the table and snapshot
  createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);

  if (shutdownCluster) {
    util.shutdownMiniHBaseCluster();
  }

  try {
    // create the job
    JobConf jobConf = new JobConf(util.getConfiguration());

    jobConf.setJarByClass(util.getClass());
    org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.addDependencyJarsForClasses(jobConf,
      TestTableSnapshotInputFormat.class);

    if(numSplitsPerRegion > 1) {
      TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS,
              TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, jobConf, true, tableDir, new RegionSplitter.UniformSplit(),
              numSplitsPerRegion);
    } else {
      TableMapReduceUtil.initTableSnapshotMapJob(snapshotName, COLUMNS,
              TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, jobConf, true, tableDir);
    }

    jobConf.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
    jobConf.setNumReduceTasks(1);
    jobConf.setOutputFormat(NullOutputFormat.class);

    RunningJob job = JobClient.runJob(jobConf);
    Assert.assertTrue(job.isSuccessful());
  } finally {
    if (!shutdownCluster) {
      util.getAdmin().deleteSnapshot(snapshotName);
      util.deleteTable(tableName);
    }
  }
}
 
Example 8
Source File: TestTableSnapshotInputFormat.java    From hbase with Apache License 2.0 4 votes vote down vote up
public static void doTestWithMapReduce(HBaseTestingUtility util, TableName tableName,
    String snapshotName, byte[] startRow, byte[] endRow, Path tableDir, int numRegions,
    int numSplitsPerRegion, int expectedNumSplits, boolean shutdownCluster) throws Exception {

  LOG.info("testing with MapReduce");

  LOG.info("create the table and snapshot");
  createTableAndSnapshot(util, tableName, snapshotName, startRow, endRow, numRegions);

  if (shutdownCluster) {
    LOG.info("shutting down hbase cluster.");
    util.shutdownMiniHBaseCluster();
  }

  try {
    // create the job
    Job job = new Job(util.getConfiguration());
    Scan scan = new Scan().withStartRow(startRow).withStopRow(endRow); // limit the scan

    job.setJarByClass(util.getClass());
    TableMapReduceUtil.addDependencyJarsForClasses(job.getConfiguration(),
            TestTableSnapshotInputFormat.class);

    if (numSplitsPerRegion > 1) {
      TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
              scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, job, true, tableDir, new RegionSplitter.UniformSplit(),
              numSplitsPerRegion);
    } else {
      TableMapReduceUtil.initTableSnapshotMapperJob(snapshotName,
              scan, TestTableSnapshotMapper.class, ImmutableBytesWritable.class,
              NullWritable.class, job, true, tableDir);
    }

    job.setReducerClass(TestTableSnapshotInputFormat.TestTableSnapshotReducer.class);
    job.setNumReduceTasks(1);
    job.setOutputFormatClass(NullOutputFormat.class);

    Assert.assertTrue(job.waitForCompletion(true));
  } finally {
    if (!shutdownCluster) {
      util.getAdmin().deleteSnapshot(snapshotName);
      util.deleteTable(tableName);
    }
  }
}
 
Example 9
Source File: TestJobSubmission.java    From spork with Apache License 2.0 4 votes vote down vote up
@Test
public void testReducerNumEstimation() throws Exception{
    // Skip the test for Tez. Tez use a different mechanism.
    // Equivalent test is in TestTezAutoParallelism
    Assume.assumeTrue("Skip this test for TEZ",
            Util.isMapredExecType(cluster.getExecType()));
    // use the estimation
    Configuration conf = HBaseConfiguration.create(new Configuration());
    HBaseTestingUtility util = new HBaseTestingUtility(conf);
    int clientPort = util.startMiniZKCluster().getClientPort();
    util.startMiniHBaseCluster(1, 1);

    String query = "a = load '/passwd';" +
            "b = group a by $0;" +
            "store b into 'output';";
    PigServer ps = new PigServer(cluster.getExecType(), cluster.getProperties());
    PhysicalPlan pp = Util.buildPp(ps, query);
    MROperPlan mrPlan = Util.buildMRPlan(pp, pc);

    pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100");
    pc.getConf().setProperty("pig.exec.reducers.max", "10");
    pc.getConf().setProperty(HConstants.ZOOKEEPER_CLIENT_PORT, Integer.toString(clientPort));
    ConfigurationValidator.validatePigProperties(pc.getProperties());
    conf = ConfigurationUtil.toConfiguration(pc.getProperties());
    JobControlCompiler jcc = new JobControlCompiler(pc, conf);
    JobControl jc=jcc.compile(mrPlan, "Test");
    Job job = jc.getWaitingJobs().get(0);
    long reducer=Math.min((long)Math.ceil(new File("test/org/apache/pig/test/data/passwd").length()/100.0), 10);

    Util.assertParallelValues(-1, -1, reducer, reducer, job.getJobConf());

    // use the PARALLEL key word, it will override the estimated reducer number
    query = "a = load '/passwd';" +
            "b = group a by $0 PARALLEL 2;" +
            "store b into 'output';";
    pp = Util.buildPp(ps, query);
    mrPlan = Util.buildMRPlan(pp, pc);

    pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100");
    pc.getConf().setProperty("pig.exec.reducers.max", "10");
    ConfigurationValidator.validatePigProperties(pc.getProperties());
    conf = ConfigurationUtil.toConfiguration(pc.getProperties());
    jcc = new JobControlCompiler(pc, conf);
    jc=jcc.compile(mrPlan, "Test");
    job = jc.getWaitingJobs().get(0);

    Util.assertParallelValues(-1, 2, -1, 2, job.getJobConf());

    final byte[] COLUMNFAMILY = Bytes.toBytes("pig");
    util.createTable(Bytes.toBytesBinary("test_table"), COLUMNFAMILY);

    // the estimation won't take effect when it apply to non-dfs or the files doesn't exist, such as hbase
    query = "a = load 'hbase://test_table' using org.apache.pig.backend.hadoop.hbase.HBaseStorage('c:f1 c:f2');" +
            "b = group a by $0 ;" +
            "store b into 'output';";
    pp = Util.buildPp(ps, query);
    mrPlan = Util.buildMRPlan(pp, pc);

    pc.getConf().setProperty("pig.exec.reducers.bytes.per.reducer", "100");
    pc.getConf().setProperty("pig.exec.reducers.max", "10");

    ConfigurationValidator.validatePigProperties(pc.getProperties());
    conf = ConfigurationUtil.toConfiguration(pc.getProperties());
    jcc = new JobControlCompiler(pc, conf);
    jc=jcc.compile(mrPlan, "Test");
    job = jc.getWaitingJobs().get(0);

    Util.assertParallelValues(-1, -1, -1, 1, job.getJobConf());

    util.deleteTable(Bytes.toBytesBinary("test_table"));
    // In HBase 0.90.1 and above we can use util.shutdownMiniHBaseCluster()
    // here instead.
    MiniHBaseCluster hbc = util.getHBaseCluster();
    if (hbc != null) {
        hbc.shutdown();
        hbc.join();
    }
    util.shutdownMiniZKCluster();
}