Java Code Examples for org.apache.hadoop.hbase.client.Table#getName()

The following examples show how to use org.apache.hadoop.hbase.client.Table#getName() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CubeVisitServiceTest.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public static void prepareTestData() throws Exception {
    try {
        util.getHBaseAdmin().disableTable(TABLE);
        util.getHBaseAdmin().deleteTable(TABLE);
    } catch (Exception e) {
        // ignore table not found
    }
    Table table = util.createTable(TABLE, FAM);
    HRegionInfo hRegionInfo = new HRegionInfo(table.getName());
    region = util.createLocalHRegion(hRegionInfo, table.getTableDescriptor());

    gtInfo = newInfo();
    GridTable gridTable = newTable(gtInfo);
    IGTScanner scanner = gridTable.scan(new GTScanRequestBuilder().setInfo(gtInfo).setRanges(null)
            .setDimensions(null).setFilterPushDown(null).createGTScanRequest());
    for (GTRecord record : scanner) {
        byte[] value = record.exportColumns(gtInfo.getPrimaryKey()).toBytes();
        byte[] key = new byte[RowConstants.ROWKEY_SHARD_AND_CUBOID_LEN + value.length];
        System.arraycopy(Bytes.toBytes(baseCuboid), 0, key, RowConstants.ROWKEY_SHARDID_LEN,
                RowConstants.ROWKEY_CUBOIDID_LEN);
        System.arraycopy(value, 0, key, RowConstants.ROWKEY_SHARD_AND_CUBOID_LEN, value.length);
        Put put = new Put(key);
        put.addColumn(FAM, COL_M, record.exportColumns(gtInfo.getColumnBlock(1)).toBytes());
        region.put(put);
    }
}
 
Example 2
Source File: TestReplicationSyncUpToolWithBulkLoadedData.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void loadAndValidateHFileReplication(String testName, byte[] row, byte[] fam,
    Table source, byte[][][] hfileRanges, int numOfRows) throws Exception {
  Path dir = UTIL1.getDataTestDirOnTestFS(testName);
  FileSystem fs = UTIL1.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(fam));

  int hfileIdx = 0;
  for (byte[][] range : hfileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    HFileTestUtil.createHFile(UTIL1.getConfiguration(), fs,
      new Path(familyDir, "hfile_" + hfileIdx++), fam, row, from, to, numOfRows);
  }

  final TableName tableName = source.getName();
  BulkLoadHFiles loader = BulkLoadHFiles.create(UTIL1.getConfiguration());
  loader.bulkLoad(tableName, dir);
}
 
Example 3
Source File: TestReplicationSyncUpToolWithBulkLoadedData.java    From hbase with Apache License 2.0 6 votes vote down vote up
private void loadFromOtherHDFSAndValidateHFileReplication(String testName, byte[] row, byte[] fam,
    Table source, byte[][][] hfileRanges, int numOfRows) throws Exception {
  Path dir = UTIL2.getDataTestDirOnTestFS(testName);
  FileSystem fs = UTIL2.getTestFileSystem();
  dir = dir.makeQualified(fs);
  Path familyDir = new Path(dir, Bytes.toString(fam));

  int hfileIdx = 0;
  for (byte[][] range : hfileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    HFileTestUtil.createHFile(UTIL2.getConfiguration(), fs,
        new Path(familyDir, "hfile_" + hfileIdx++), fam, row, from, to, numOfRows);
  }

  final TableName tableName = source.getName();
  BulkLoadHFiles loader = BulkLoadHFiles.create(UTIL1.getConfiguration());
  loader.bulkLoad(tableName, dir);
}
 
Example 4
Source File: CubeVisitServiceTest.java    From kylin with Apache License 2.0 5 votes vote down vote up
public static void prepareTestData() throws Exception {
    try {
        util.getHBaseAdmin().disableTable(TABLE);
        util.getHBaseAdmin().deleteTable(TABLE);
    } catch (Exception e) {
        // ignore table not found
    }
    Table table = util.createTable(TABLE, FAM);
    HRegionInfo hRegionInfo = new HRegionInfo(table.getName());
    region = util.createLocalHRegion(hRegionInfo, table.getTableDescriptor());

    gtInfo = newInfo();
    GridTable gridTable = newTable(gtInfo);
    IGTScanner scanner = gridTable.scan(new GTScanRequestBuilder().setInfo(gtInfo).setRanges(null)
            .setDimensions(null).setFilterPushDown(null).createGTScanRequest());
    for (GTRecord record : scanner) {
        byte[] value = record.exportColumns(gtInfo.getPrimaryKey()).toBytes();
        byte[] key = new byte[RowConstants.ROWKEY_SHARD_AND_CUBOID_LEN + value.length];
        System.arraycopy(Bytes.toBytes(baseCuboid), 0, key, RowConstants.ROWKEY_SHARDID_LEN,
                RowConstants.ROWKEY_CUBOIDID_LEN);
        System.arraycopy(value, 0, key, RowConstants.ROWKEY_SHARD_AND_CUBOID_LEN, value.length);
        Put put = new Put(key);
        put.addColumn(FAM[0], COL_M, record.exportColumns(gtInfo.getColumnBlock(1)).toBytes());
        put.addColumn(FAM[1], COL_M, record.exportColumns(gtInfo.getColumnBlock(2)).toBytes());
        region.put(put);
    }
}
 
Example 5
Source File: TestMasterReplication.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void loadAndValidateHFileReplication(String testName, int masterNumber,
    int[] slaveNumbers, byte[] row, byte[] fam, Table[] tables, byte[][][] hfileRanges,
    int numOfRows, int[] expectedCounts, boolean toValidate) throws Exception {
  HBaseTestingUtility util = utilities[masterNumber];

  Path dir = util.getDataTestDirOnTestFS(testName);
  FileSystem fs = util.getTestFileSystem();
  dir = dir.makeQualified(fs.getUri(), fs.getWorkingDirectory());
  Path familyDir = new Path(dir, Bytes.toString(fam));

  int hfileIdx = 0;
  for (byte[][] range : hfileRanges) {
    byte[] from = range[0];
    byte[] to = range[1];
    HFileTestUtil.createHFile(util.getConfiguration(), fs,
      new Path(familyDir, "hfile_" + hfileIdx++), fam, row, from, to, numOfRows);
  }

  Table source = tables[masterNumber];
  final TableName tableName = source.getName();
  BulkLoadHFiles.create(util.getConfiguration()).bulkLoad(tableName, dir);

  if (toValidate) {
    for (int slaveClusterNumber : slaveNumbers) {
      wait(slaveClusterNumber, tables[slaveClusterNumber], expectedCounts[slaveClusterNumber]);
    }
  }
}
 
Example 6
Source File: TestEndToEndSplitTransaction.java    From hbase with Apache License 2.0 5 votes vote down vote up
RegionSplitter(Table table) throws IOException {
  this.table = table;
  this.tableName = table.getName();
  this.family = table.getDescriptor().getColumnFamilies()[0].getName();
  admin = TEST_UTIL.getAdmin();
  rs = TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
  connection = TEST_UTIL.getConnection();
}