Java Code Examples for org.apache.hadoop.hbase.client.HTable

The following are top voted examples for showing how to use org.apache.hadoop.hbase.client.HTable. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: ditb   File: OfflineMetaRebuildTestCore.java   Source Code and License 11 votes vote down vote up
protected void wipeOutMeta() throws IOException {
  // Mess it up by blowing up meta.
  Admin admin = TEST_UTIL.getHBaseAdmin();
  Scan s = new Scan();
  Table meta = new HTable(conf, TableName.META_TABLE_NAME);
  ResultScanner scanner = meta.getScanner(s);
  List<Delete> dels = new ArrayList<Delete>();
  for (Result r : scanner) {
    HRegionInfo info =
        HRegionInfo.getHRegionInfo(r);
    if(info != null && !info.getTable().getNamespaceAsString()
        .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
      Delete d = new Delete(r.getRow());
      dels.add(d);
      admin.unassign(r.getRow(), true);
    }
  }
  meta.delete(dels);
  scanner.close();
  meta.close();
}
 
Example 2
Project: ditb   File: TestQuotaThrottle.java   Source Code and License 7 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
  TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
  TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
  TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
  TEST_UTIL.startMiniCluster(1);
  TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
  QuotaCache.setTEST_FORCE_REFRESH(true);

  tables = new HTable[TABLE_NAMES.length];
  for (int i = 0; i < TABLE_NAMES.length; ++i) {
    tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY);
  }
}
 
Example 3
Project: ditb   File: TableInputFormatBase.java   Source Code and License 7 votes vote down vote up
/**
 * Allows subclasses to set the {@link HTable}.
 *
 * Will attempt to reuse the underlying Connection for our own needs, including
 * retreiving an Admin interface to the HBase cluster.
 *
 * @param table  The table to get the data from.
 * @throws IOException 
 * @deprecated Use {@link #initializeTable(Connection, TableName)} instead.
 */
@Deprecated
protected void setHTable(HTable table) throws IOException {
  this.table = table;
  this.connection = table.getConnection();
  try {
    this.regionLocator = table.getRegionLocator();
    this.admin = this.connection.getAdmin();
  } catch (NeedUnmanagedConnectionException exception) {
    LOG.warn("You are using an HTable instance that relies on an HBase-managed Connection. " +
        "This is usually due to directly creating an HTable, which is deprecated. Instead, you " +
        "should create a Connection object and then request a Table instance from it. If you " +
        "don't need the Table instance for your own use, you should instead use the " +
        "TableInputFormatBase.initalizeTable method directly.");
    LOG.info("Creating an additional unmanaged connection because user provided one can't be " +
        "used for administrative actions. We'll close it when we close out the table.");
    LOG.debug("Details about our failure to request an administrative interface.", exception);
    // Do we need a "copy the settings from this Connection" method? are things like the User
    // properly maintained by just looking again at the Configuration?
    this.connection = ConnectionFactory.createConnection(this.connection.getConfiguration());
    this.regionLocator = this.connection.getRegionLocator(table.getName());
    this.admin = this.connection.getAdmin();
  }
}
 
Example 4
Project: ditb   File: TestZKBasedOpenCloseRegion.java   Source Code and License 7 votes vote down vote up
@BeforeClass public static void beforeAllTests() throws Exception {
  Configuration c = TEST_UTIL.getConfiguration();
  c.setBoolean("hbase.assignment.usezk", true);
  c.setBoolean("dfs.support.append", true);
  c.setInt("hbase.regionserver.info.port", 0);
  TEST_UTIL.startMiniCluster(2);
  TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILIES);
  HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  countOfRegions = -1;
  try (RegionLocator r = t.getRegionLocator()) {
    countOfRegions = r.getStartKeys().length;
  }
  waitUntilAllRegionsAssigned();
  addToEachStartKey(countOfRegions);
  t.close();
  TEST_UTIL.getHBaseCluster().getMaster().assignmentManager.initializeHandlerTrackers();
}
 
Example 5
Project: SparkDemo   File: MyClass.java   Source Code and License 6 votes vote down vote up
public static void QueryByCondition2(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);
            Filter filter = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa")); // 当列column1的值为aaa时进行查询
            Scan s = new Scan();
            s.setFilter(filter);
            ResultScanner rs = table.getScanner(s);
            for (Result r : rs) {
                System.out.println("获得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
Example 6
Project: ditb   File: TestHBaseFsck.java   Source Code and License 6 votes vote down vote up
/**
 * Setup a clean table with a certain region_replica count
 *
 * It will set tbl which needs to be closed after test
 *
 * @param tableName
 * @param replicaCount
 * @throws Exception
 */
void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(tablename);
  desc.setRegionReplication(replicaCount);
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
  desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
  createTable(TEST_UTIL, desc, SPLITS);

  tbl = (HTable) connection.getTable(tablename, tableExecutorService);
  List<Put> puts = new ArrayList<Put>();
  for (byte[] row : ROWKEYS) {
    Put p = new Put(row);
    p.add(FAM, Bytes.toBytes("val"), row);
    puts.add(p);
  }
  tbl.put(puts);
  tbl.flushCommits();
}
 
Example 7
Project: HBase-High-Performance-Cookbook   File: HBaseRegularClient.java   Source Code and License 6 votes vote down vote up
/**
 * Getting all records  a row from an existing SS tables 
 * @method getAllRecord
 * @inputParameters hbaseBtable Name used
 * @return type: no return type as its a void method 
 * 
 **/
@SuppressWarnings({ "deprecation", "resource" })
public static void getAllRecord(String myHbaseBtableName) {
  ResultScanner hbaseBSs = null;
  try {
    HTable hbaseBtable = new HTable(hbaseBconf, myHbaseBtableName);
    Scan hbaseBScan = new Scan();
    hbaseBSs = hbaseBtable.getScanner(hbaseBScan);
    for (Result r : hbaseBSs) {
      for (KeyValue hbaseBkv : r.raw()) {
        System.out.print(new String(hbaseBkv.getRow()) + " ");
        System.out.print(new String(hbaseBkv.getFamily()) + ":");
        System.out.print(new String(hbaseBkv.getQualifier()) + " ");
        System.out.print(hbaseBkv.getTimestamp() + " ");
        System.out.println(new String(hbaseBkv.getValue()));
      }
    }
  } catch (IOException eio) {
    eip.printStackTrace();
  } finally {
    if (hbaseBSs != null) hbaseBSs.close();
    // closing the ss hbaseBtable 
  }
}
 
Example 8
Project: ditb   File: IntegrationTestBigLinkedList.java   Source Code and License 6 votes vote down vote up
@Override
public int run(String[] args) throws Exception {
  if (args.length != 1) {
    System.out.println("Usage : " + Delete.class.getSimpleName() + " <node to delete>");
    return 0;
  }
  byte[] val = Bytes.toBytesBinary(args[0]);

  org.apache.hadoop.hbase.client.Delete delete
    = new org.apache.hadoop.hbase.client.Delete(val);

  Table table = new HTable(getConf(), getTableName(getConf()));
  table.delete(delete);
  table.close();

  System.out.println("Delete successful");
  return 0;
}
 
Example 9
Project: ditb   File: TestMultiTableInputFormat.java   Source Code and License 6 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // switch TIF to log at DEBUG level
  TEST_UTIL.enableDebug(MultiTableInputFormat.class);
  TEST_UTIL.enableDebug(MultiTableInputFormatBase.class);
  TEST_UTIL.setJobWithoutMRCluster();
  // start mini hbase cluster
  TEST_UTIL.startMiniCluster(3);
  // create and fill table
  for (int i = 0; i < 3; i++) {
    try (HTable table =
        TEST_UTIL.createMultiRegionTable(TableName.valueOf(TABLE_NAME + String.valueOf(i)),
          INPUT_FAMILY, 4)) {
      TEST_UTIL.loadTable(table, INPUT_FAMILY, false);
    }
  }
}
 
Example 10
Project: Transwarp-Sample-Code   File: LobUtil.java   Source Code and License 6 votes vote down vote up
/**
 * 上传对象到LOB
 * @param tableName Hyperbase表名
 * @param row rowkey byte形式
 * @param filename 文件名
 * @param fileData 文件
 */
public void putLob(String tableName, String row, String filename, byte[] fileData){
    byte[] rowkey = Bytes.toBytes(row);
    try {
        HTable htable = new HTable(conf, tableName);
        Put put = new Put(rowkey);
        put.add(Bytes.toBytes(family1), Bytes.toBytes(f1_q1), Bytes.toBytes(filename));
        put.add(Bytes.toBytes(family2), Bytes.toBytes(f2_q1), fileData);
        htable.put(put);
        htable.flushCommits();
        htable.close();
    } catch (IOException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
}
 
Example 11
Project: Transwarp-Sample-Code   File: udtfCheck.java   Source Code and License 6 votes vote down vote up
@Override
public void process(Object[] record) throws HiveException {
    final String document = (String) stringOI.getPrimitiveJavaObject(record[0]);

    if (document == null) {
        return;
    }

    String[] tokens = document.split(",");
    String[] results = tokens[1].split(" ");

    try {
        hTable = new HTable(conf, "bi");
        Get get = new Get(Bytes.toBytes(tokens[0]));
        result = hTable.exists(get);
    } catch (Exception e) {
        e.printStackTrace();
    }

    if (!result) {
        for (String r : results) {
            forward(new Object[]{tokens[0], r});
        }
    }
}
 
Example 12
Project: ditb   File: TestLoadIncrementalHFiles.java   Source Code and License 6 votes vote down vote up
private void runTest(String testName, HTableDescriptor htd, BloomType bloomType,
    boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges) throws Exception {

  for (boolean managed : new boolean[] { true, false }) {
    Path dir = util.getDataTestDirOnTestFS(testName);
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(FAMILY));

    int hfileIdx = 0;
    for (byte[][] range : hfileRanges) {
      byte[] from = range[0];
      byte[] to = range[1];
      HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
          + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000);
    }
    int expectedRows = hfileIdx * 1000;

    if (preCreateTable) {
      util.getHBaseAdmin().createTable(htd, tableSplitKeys);
    }

    final TableName tableName = htd.getTableName();
    if (!util.getHBaseAdmin().tableExists(tableName)) {
      util.getHBaseAdmin().createTable(htd);
    }
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());

    if (managed) {
      try (HTable table = new HTable(util.getConfiguration(), tableName)) {
        loader.doBulkLoad(dir, table);
        assertEquals(expectedRows, util.countRows(table));
      }
    } else {
      try (Connection conn = ConnectionFactory.createConnection(util.getConfiguration());
          HTable table = (HTable) conn.getTable(tableName)) {
        loader.doBulkLoad(dir, table);
      }
    }

    // verify staging folder has been cleaned up
    Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration());
    if (fs.exists(stagingBasePath)) {
      FileStatus[] files = fs.listStatus(stagingBasePath);
      for (FileStatus file : files) {
        assertTrue("Folder=" + file.getPath() + " is not cleaned up.",
            file.getPath().getName() != "DONOTERASE");
      }
    }

    util.deleteTable(tableName);
  }
}
 
Example 13
Project: ditb   File: TestScannersWithLabels.java   Source Code and License 6 votes vote down vote up
private static int insertData(TableName tableName, String column, double prob) throws IOException {
  byte[] k = new byte[3];
  byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));

  List<Put> puts = new ArrayList<>();
  for (int i = 0; i < 9; i++) {
    Put put = new Put(Bytes.toBytes("row" + i));
    put.setDurability(Durability.SKIP_WAL);
    put.add(famAndQf[0], famAndQf[1], k);
    put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!"
        + TOPSECRET));
    puts.add(put);
  }
  try (Table table = new HTable(TEST_UTIL.getConfiguration(), tableName)) {
    table.put(puts);
  }
  return puts.size();
}
 
Example 14
Project: aliyun-maxcompute-data-collectors   File: HBaseBulkImportJob.java   Source Code and License 6 votes vote down vote up
@Override
protected void jobSetup(Job job) throws IOException, ImportException {
  super.jobSetup(job);

  // we shouldn't have gotten here if bulk load dir is not set
  // so let's throw a ImportException
  if(getContext().getDestination() == null){
    throw new ImportException("Can't run HBaseBulkImportJob without a " +
        "valid destination directory.");
  }

  TableMapReduceUtil.addDependencyJars(job.getConfiguration(), Preconditions.class);
  FileOutputFormat.setOutputPath(job, getContext().getDestination());
  HTable hTable = new HTable(job.getConfiguration(), options.getHBaseTable());
  HFileOutputFormat.configureIncrementalLoad(job, hTable);
}
 
Example 15
Project: ditb   File: TestHFileOutputFormat.java   Source Code and License 6 votes vote down vote up
private void runIncrementalPELoad(
    Configuration conf, HTable table, Path outDir)
throws Exception {
  Job job = new Job(conf, "testLocalMRIncrementalLoad");
  job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad"));
  job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName(),
      KeyValueSerialization.class.getName());
  setupRandomGeneratorMapper(job);
  HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(),
      table.getRegionLocator());
  FileOutputFormat.setOutputPath(job, outDir);

  Assert.assertFalse( util.getTestFileSystem().exists(outDir)) ;

  assertEquals(table.getRegionLocator().getAllRegionLocations().size(), job.getNumReduceTasks());

  assertTrue(job.waitForCompletion(true));
}
 
Example 16
Project: ditb   File: TestLogRollPeriod.java   Source Code and License 6 votes vote down vote up
/**
 * Tests that the LogRoller perform the roll even if there are no edits
 */
@Test
public void testNoEdits() throws Exception {
  TableName tableName = TableName.valueOf("TestLogRollPeriodNoEdits");
  TEST_UTIL.createTable(tableName, "cf");
  try {
    Table table = new HTable(TEST_UTIL.getConfiguration(), tableName);
    try {
      HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName);
      WAL log = server.getWAL(null);
      checkMinLogRolls(log, 5);
    } finally {
      table.close();
    }
  } finally {
    TEST_UTIL.deleteTable(tableName);
  }
}
 
Example 17
Project: ditb   File: TestCoprocessorEndpoint.java   Source Code and License 6 votes vote down vote up
@BeforeClass
public static void setupBeforeClass() throws Exception {
  // set configure to indicate which cp should be loaded
  Configuration conf = util.getConfiguration();
  conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000);
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(),
      ProtobufCoprocessorService.class.getName());
  conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
      ProtobufCoprocessorService.class.getName());
  util.startMiniCluster(2);

  Admin admin = util.getHBaseAdmin();
  HTableDescriptor desc = new HTableDescriptor(TEST_TABLE);
  desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
  admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]});
  util.waitUntilAllRegionsAssigned(TEST_TABLE);

  Table table = new HTable(conf, TEST_TABLE);
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
    table.put(put);
  }
  table.close();
}
 
Example 18
Project: ditb   File: TestMultiRowRangeFilter.java   Source Code and License 6 votes vote down vote up
@Test
public void testMultiRowRangeFilterWithoutRangeOverlap() throws IOException {
  tableName = Bytes.toBytes("testMultiRowRangeFilterWithoutRangeOverlap");
  HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
  generateRows(numRows, ht, family, qf, value);

  Scan scan = new Scan();
  scan.setMaxVersions();

  List<RowRange> ranges = new ArrayList<RowRange>();
  ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
  ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
  ranges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false));

  MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
  scan.setFilter(filter);
  int resultsSize = getResultsSize(ht, scan);
  LOG.info("found " + resultsSize + " results");
  List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(20), ht);
  List<Cell> results2 = getScanResult(Bytes.toBytes(30), Bytes.toBytes(40), ht);
  List<Cell> results3 = getScanResult(Bytes.toBytes(60), Bytes.toBytes(70), ht);

  assertEquals(results1.size() + results2.size() + results3.size(), resultsSize);

  ht.close();
}
 
Example 19
Project: ditb   File: TestOpenTableInCoprocessor.java   Source Code and License 6 votes vote down vote up
private void runCoprocessorConnectionToRemoteTable(Class<? extends BaseRegionObserver> clazz,
    boolean[] completeCheck) throws Throwable {
  HTableDescriptor primary = new HTableDescriptor(primaryTable);
  primary.addFamily(new HColumnDescriptor(family));
  // add our coprocessor
  primary.addCoprocessor(clazz.getName());

  HTableDescriptor other = new HTableDescriptor(otherTable);
  other.addFamily(new HColumnDescriptor(family));


  Admin admin = UTIL.getHBaseAdmin();
  admin.createTable(primary);
  admin.createTable(other);

  Table table = new HTable(UTIL.getConfiguration(), TableName.valueOf("primary"));
  Put p = new Put(new byte[] { 'a' });
  p.add(family, null, new byte[] { 'a' });
  table.put(p);
  table.close();

  Table target = new HTable(UTIL.getConfiguration(), otherTable);
  assertTrue("Didn't complete update to target table!", completeCheck[0]);
  assertEquals("Didn't find inserted row", 1, getKeyValueCount(target));
  target.close();
}
 
Example 20
Project: ditb   File: TestMultiRowRangeFilter.java   Source Code and License 6 votes vote down vote up
@Test
public void testMultiRowRangeFilterWithEmptyStartRow() throws IOException {
  tableName = Bytes.toBytes("testMultiRowRangeFilterWithEmptyStartRow");
  HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
  generateRows(numRows, ht, family, qf, value);
  Scan scan = new Scan();
  scan.setMaxVersions();

  List<RowRange> ranges = new ArrayList<RowRange>();
  ranges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(10), false));
  ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));

  MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
  scan.setFilter(filter);
  int resultsSize = getResultsSize(ht, scan);
  List<Cell> results1 = getScanResult(Bytes.toBytes(""), Bytes.toBytes(10), ht);
  List<Cell> results2 = getScanResult(Bytes.toBytes(30), Bytes.toBytes(40), ht);
  assertEquals(results1.size() + results2.size(), resultsSize);

  ht.close();
}
 
Example 21
Project: ditb   File: TestNamespaceUpgrade.java   Source Code and License 6 votes vote down vote up
@Test (timeout=300000)
public void testSnapshots() throws IOException, InterruptedException {
  String snapshots[][] = {snapshot1Keys, snapshot2Keys};
  for(int i = 1; i <= snapshots.length; i++) {
    for(TableName table: tables) {
      TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, TableName.valueOf(table+"_clone"+i));
      FSUtils.logFileSystemState(FileSystem.get(TEST_UTIL.getConfiguration()),
          FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
          LOG);
      int count = 0;
      for(Result res: new HTable(TEST_UTIL.getConfiguration(), table+"_clone"+i).getScanner(new
          Scan())) {
        assertEquals(snapshots[i-1][count++], Bytes.toString(res.getRow()));
      }
      Assert.assertEquals(table+"_snapshot"+i, snapshots[i-1].length, count);
    }
  }
}
 
Example 22
Project: ditb   File: IndexChooser.java   Source Code and License 6 votes vote down vote up
public IndexChooser(final IndexTable indexTable) throws IOException {
  this.indexTable = indexTable;
  indexRegionMaps = new TreeMap<byte[], List<HRegionInfo>>(Bytes.BYTES_COMPARATOR);

  for (Map.Entry<byte[], Table> entry : indexTable.getIndexTableMaps().entrySet()) {
    if (!(entry.getValue() instanceof HTable)) {
      throw new IOException(
          "table is not an instance of HTable, it is " + entry.getValue().getClass().getName());
    }
    HTable htable = (HTable) entry.getValue();
    ArrayList<HRegionInfo> list =
        new ArrayList<HRegionInfo>(htable.getRegionLocations().keySet());
    indexRegionMaps.put(entry.getKey(), list);
  }

  speedTimes = DEFAULT_SPEED_TIMES;
}
 
Example 23
Project: ditb   File: TestBulkDeleteProtocol.java   Source Code and License 6 votes vote down vote up
public void testBulkDeleteFamily() throws Throwable {
  TableName tableName = TableName.valueOf("testBulkDeleteFamily");
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor(FAMILY1));
  htd.addFamily(new HColumnDescriptor(FAMILY2));
  TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5);
  Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    Put put = new Put(Bytes.toBytes(j));
    put.add(FAMILY1, QUALIFIER1, "v1".getBytes());
    put.add(FAMILY2, QUALIFIER2, "v2".getBytes());
    puts.add(put);
  }
  ht.put(puts);
  Scan scan = new Scan();
  scan.addFamily(FAMILY1);
  // Delete the column family cf1
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.FAMILY, null);
  assertEquals(100, noOfRowsDeleted);
  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    assertTrue(result.getFamilyMap(FAMILY1).isEmpty());
    assertEquals(1, result.getColumnCells(FAMILY2, QUALIFIER2).size());
    rows++;
  }
  assertEquals(100, rows);
  ht.close();
}
 
Example 24
Project: ditb   File: TestHBaseFsck.java   Source Code and License 5 votes vote down vote up
@Test (timeout=180000)
public void testTableWithNoRegions() throws Exception {
  // We might end up with empty regions in a table
  // see also testNoHdfsTable()
  TableName table =
      TableName.valueOf(name.getMethodName());
  try {
    // create table with one region
    HTableDescriptor desc = new HTableDescriptor(table);
    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
    desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
    createTable(TEST_UTIL, desc, null);
    tbl = (HTable) connection.getTable(table, tableExecutorService);

    // Mess it up by leaving a hole in the assignment, meta, and hdfs data
    deleteRegion(conf, tbl.getTableDescriptor(), HConstants.EMPTY_START_ROW,
        HConstants.EMPTY_END_ROW, false, false, true);

    HBaseFsck hbck = doFsck(conf, false);
    assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_HDFS });

    doFsck(conf, true);

    // fix hole
    doFsck(conf, true);

    // check that hole fixed
    assertNoErrors(doFsck(conf, false));
  } finally {
    cleanupTable(table);
  }

}
 
Example 25
Project: ditb   File: TestRowCountEndpoint.java   Source Code and License 5 votes vote down vote up
public void testEndpoint() throws Throwable {
  Table table = new HTable(CONF, TEST_TABLE);

  // insert some test rows
  for (int i=0; i<5; i++) {
    byte[] iBytes = Bytes.toBytes(i);
    Put p = new Put(iBytes);
    p.add(TEST_FAMILY, TEST_COLUMN, iBytes);
    table.put(p);
  }

  final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
  Map<byte[],Long> results = table.coprocessorService(ExampleProtos.RowCountService.class,
      null, null,
      new Batch.Call<ExampleProtos.RowCountService,Long>() {
        public Long call(ExampleProtos.RowCountService counter) throws IOException {
          ServerRpcController controller = new ServerRpcController();
          BlockingRpcCallback<ExampleProtos.CountResponse> rpcCallback =
              new BlockingRpcCallback<ExampleProtos.CountResponse>();
          counter.getRowCount(controller, request, rpcCallback);
          ExampleProtos.CountResponse response = rpcCallback.get();
          if (controller.failedOnException()) {
            throw controller.getFailedOn();
          }
          return (response != null && response.hasCount()) ? response.getCount() : 0;
        }
      });
  // should be one region with results
  assertEquals(1, results.size());
  Iterator<Long> iter = results.values().iterator();
  Long val = iter.next();
  assertNotNull(val);
  assertEquals(5l, val.longValue());
}
 
Example 26
Project: flume-release-1.7.0   File: TestAsyncHBaseSink.java   Source Code and License 5 votes vote down vote up
@Test
public void testThreeEvents() throws Exception {
  testUtility.createTable(tableName.getBytes(), columnFamily.getBytes());
  deleteTable = true;
  AsyncHBaseSink sink = new AsyncHBaseSink(testUtility.getConfiguration());
  Configurables.configure(sink, ctx);
  Channel channel = new MemoryChannel();
  Configurables.configure(channel, ctx);
  sink.setChannel(channel);
  sink.start();
  Transaction tx = channel.getTransaction();
  tx.begin();
  for (int i = 0; i < 3; i++) {
    Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i));
    channel.put(e);
  }
  tx.commit();
  tx.close();
  Assert.assertFalse(sink.isConfNull());
  sink.process();
  sink.stop();
  HTable table = new HTable(testUtility.getConfiguration(), tableName);
  byte[][] results = getResults(table, 3);
  byte[] out;
  int found = 0;
  for (int i = 0; i < 3; i++) {
    for (int j = 0; j < 3; j++) {
      if (Arrays.equals(results[j], Bytes.toBytes(valBase + "-" + i))) {
        found++;
        break;
      }
    }
  }
  Assert.assertEquals(3, found);
  out = results[3];
  Assert.assertArrayEquals(Longs.toByteArray(3), out);
}
 
Example 27
Project: ditb   File: TestServerCustomProtocol.java   Source Code and License 5 votes vote down vote up
@Test
public void testNullReturn() throws Throwable {
  try (HTable table = new HTable(util.getConfiguration(), TEST_TABLE)) {
    RegionLocator locator = table.getRegionLocator();
    Map<byte[],String> results = hello(table, "nobody", ROW_A, ROW_C);
    verifyRegionResults(locator, results, null, ROW_A);
    verifyRegionResults(locator, results, null, ROW_B);
    verifyRegionResults(locator, results, null, ROW_C);
  }
}
 
Example 28
Project: ditb   File: TestChangingEncoding.java   Source Code and License 5 votes vote down vote up
static void verifyTestDataBatch(Configuration conf, TableName tableName,
    int batchId) throws Exception {
  LOG.debug("Verifying test data batch " + batchId);
  Table table = new HTable(conf, tableName);
  for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
    Get get = new Get(getRowKey(batchId, i));
    Result result = table.get(get);
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      Cell kv = result.getColumnLatestCell(CF_BYTES, getQualifier(j));
      assertTrue(CellUtil.matchingValue(kv, getValue(batchId, i, j)));
    }
  }
  table.close();
}
 
Example 29
Project: ditb   File: HBaseTestingUtility.java   Source Code and License 5 votes vote down vote up
/**
 * Truncate a table using the admin command.
 * Effectively disables, deletes, and recreates the table.
 * @param tableName table which must exist.
 * @param preserveRegions keep the existing split points
 * @return HTable for the new table
 */
public HTable truncateTable(final TableName tableName, final boolean preserveRegions)
    throws IOException {
  Admin admin = getHBaseAdmin();
  if (!admin.isTableDisabled(tableName)) {
    admin.disableTable(tableName);
  }
  admin.truncateTable(tableName, preserveRegions);
  return new HTable(getConfiguration(), tableName);
}
 
Example 30
Project: ditb   File: TestServerCustomProtocol.java   Source Code and License 5 votes vote down vote up
@Test
public void testCompoundCall() throws Throwable {
  try (HTable table = new HTable(util.getConfiguration(), TEST_TABLE)) {
    RegionLocator locator = table.getRegionLocator();
    Map<byte [], String> results = compoundOfHelloAndPing(table, ROW_A, ROW_C);
    verifyRegionResults(locator, results, "Hello, pong", ROW_A);
    verifyRegionResults(locator, results, "Hello, pong", ROW_B);
    verifyRegionResults(locator, results, "Hello, pong", ROW_C);
  }
}
 
Example 31
Project: flume-release-1.7.0   File: TestHBaseSink.java   Source Code and License 5 votes vote down vote up
@Test
public void testTransactionStateOnSerializationException() throws Exception {
  initContextForSimpleHbaseEventSerializer();
  ctx.put("batchSize", "1");
  ctx.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER,
          "org.apache.flume.sink.hbase.MockSimpleHbaseEventSerializer");

  HBaseSink sink = new HBaseSink(conf);
  Configurables.configure(sink, ctx);
  // Reset the context to a higher batchSize
  ctx.put("batchSize", "100");
  Channel channel = new MemoryChannel();
  Configurables.configure(channel, new Context());
  sink.setChannel(channel);
  sink.start();
  Transaction tx = channel.getTransaction();
  tx.begin();
  Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + 0));
  channel.put(e);
  tx.commit();
  tx.close();
  try {
    MockSimpleHbaseEventSerializer.throwException = true;
    sink.process();
    Assert.fail("FlumeException expected from serilazer");
  } catch (FlumeException ex) {
    Assert.assertEquals("Exception for testing", ex.getMessage());
  }
  MockSimpleHbaseEventSerializer.throwException = false;
  sink.process();
  sink.stop();
  HTable table = new HTable(conf, tableName);
  byte[][] results = getResults(table, 1);
  byte[] out = results[0];
  Assert.assertArrayEquals(e.getBody(), out);
  out = results[1];
  Assert.assertArrayEquals(Longs.toByteArray(1), out);
}
 
Example 32
Project: ditb   File: TestHFileOutputFormat2.java   Source Code and License 5 votes vote down vote up
/**
 * Test for {@link HFileOutputFormat2#configureDataBlockEncoding(HTableDescriptor, Configuration)}
 * and {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test
public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, DataBlockEncoding> familyToDataBlockEncoding =
        getMockColumnFamiliesForDataBlockEncoding(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForDataBlockEncoding(table,
        familyToDataBlockEncoding);
    HTableDescriptor tableDescriptor = table.getTableDescriptor();
    HFileOutputFormat2.configureDataBlockEncoding(tableDescriptor, conf);

    // read back family specific data block encoding settings from the
    // configuration
    Map<byte[], DataBlockEncoding> retrievedFamilyToDataBlockEncodingMap =
        HFileOutputFormat2
        .createFamilyDataBlockEncodingMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
      assertEquals("DataBlockEncoding configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes()));
    }
  }
}
 
Example 33
Project: Transwarp-Sample-Code   File: udfCheck.java   Source Code and License 5 votes vote down vote up
public static boolean evaluate(String rowkey) {
    try {
        hTable = new HTable(conf, "bi");
        Get get = new Get(Bytes.toBytes(rowkey));
        result = hTable.exists(get);
        return result;
    } catch (Exception e) {
        e.printStackTrace();
    }
    return false;
}
 
Example 34
Project: QDrill   File: HBaseGroupScan.java   Source Code and License 5 votes vote down vote up
private void init() {
  logger.debug("Getting region locations");
  try {
    HTable table = new HTable(storagePluginConfig.getHBaseConf(), hbaseScanSpec.getTableName());
    this.hTableDesc = table.getTableDescriptor();
    NavigableMap<HRegionInfo, ServerName> regionsMap = table.getRegionLocations();
    statsCalculator = new TableStatsCalculator(table, hbaseScanSpec, storagePlugin.getContext().getConfig(), storagePluginConfig);

    boolean foundStartRegion = false;
    regionsToScan = new TreeMap<HRegionInfo, ServerName>();
    for (Entry<HRegionInfo, ServerName> mapEntry : regionsMap.entrySet()) {
      HRegionInfo regionInfo = mapEntry.getKey();
      if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
        continue;
      }
      foundStartRegion = true;
      regionsToScan.put(regionInfo, mapEntry.getValue());
      scanSizeInBytes += statsCalculator.getRegionSizeInBytes(regionInfo.getRegionName());
      if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
        break;
      }
    }

    table.close();
  } catch (IOException e) {
    throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
  }
  verifyColumns();
}
 
Example 35
Project: ditb   File: TestServerCustomProtocol.java   Source Code and License 5 votes vote down vote up
@Test
public void testEmptyReturnType() throws Throwable {
  try (HTable table = new HTable(util.getConfiguration(), TEST_TABLE)) {
    Map<byte[],String> results = noop(table, ROW_A, ROW_C);
    assertEquals("Should have results from three regions", 3, results.size());
    // all results should be null
    for (Object v : results.values()) {
      assertNull(v);
    }
  }
}
 
Example 36
Project: QDrill   File: TestTableGenerator.java   Source Code and License 5 votes vote down vote up
public static void generateHBaseDatasetCompositeKeyInt(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  int startVal = 0;
  int stopVal = 1000;
  int interval = 47;
  long counter = 0;
  for (int i = startVal; i < stopVal; i += interval, counter ++) {
    byte[] rowKey = ByteBuffer.allocate(12).putInt(i).array();

    for(int j = 0; j < 8; ++j) {
      rowKey[4 + j] = (byte)(counter >> (56 - (j * 8)));
    }

    Put p = new Put(rowKey);
    p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();
}
 
Example 37
Project: QDrill   File: TestTableGenerator.java   Source Code and License 5 votes vote down vote up
public static void generateHBaseDatasetDoubleOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
Example 38
Project: ditb   File: TestGetLastFlushedSequenceId.java   Source Code and License 5 votes vote down vote up
@Test
public void test() throws IOException, InterruptedException {
  testUtil.getHBaseAdmin().createNamespace(
    NamespaceDescriptor.create(tableName.getNamespaceAsString()).build());
  HTable table = testUtil.createTable(tableName, families);
  table.put(new Put(Bytes.toBytes("k")).add(family, Bytes.toBytes("q"), Bytes.toBytes("v")));
  table.flushCommits();
  MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
  List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
  Region region = null;
  for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
    HRegionServer hrs = rsts.get(i).getRegionServer();
    for (Region r : hrs.getOnlineRegions(tableName)) {
      region = r;
      break;
    }
  }
  assertNotNull(region);
  Thread.sleep(2000);
  RegionStoreSequenceIds ids =
      testUtil.getHBaseCluster().getMaster()
          .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
  assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId());
  // This will be the sequenceid just before that of the earliest edit in memstore.
  long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId();
  assertTrue(storeSequenceId > 0);
  testUtil.getHBaseAdmin().flush(tableName);
  Thread.sleep(2000);
  ids =
      testUtil.getHBaseCluster().getMaster()
          .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
  assertTrue(ids.getLastFlushedSequenceId() + " > " + storeSequenceId,
    ids.getLastFlushedSequenceId() > storeSequenceId);
  assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId());
  table.close();
}
 
Example 39
Project: QDrill   File: TestTableGenerator.java   Source Code and License 5 votes vote down vote up
public static void generateHBaseDatasetIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (int i = -49; i <= 100; i ++) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeInt32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
Example 40
Project: QDrill   File: TestTableGenerator.java   Source Code and License 5 votes vote down vote up
public static void generateHBaseDatasetDoubleOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
Example 41
Project: QDrill   File: TestTableGenerator.java   Source Code and License 5 votes vote down vote up
public static void generateHBaseDatasetFloatOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (float i = (float)0.5; i <= 100.00; i += 0.75) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat32(br, i,
            org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
Example 42
Project: QDrill   File: TestTableGenerator.java   Source Code and License 5 votes vote down vote up
public static void generateHBaseDatasetBigIntOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

 HTableDescriptor desc = new HTableDescriptor(tableName);
desc.addFamily(new HColumnDescriptor(FAMILY_F));

if (numberRegions > 1) {
  admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
} else {
  admin.createTable(desc);
}

HTable table = new HTable(admin.getConfiguration(), tableName);
long startTime = (long)1438034423 * 1000;
for (long i = startTime; i <= startTime + 100; i ++) {
  byte[] bytes = new byte[9];
  org.apache.hadoop.hbase.util.PositionedByteRange br =
          new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
  org.apache.hadoop.hbase.util.OrderedBytes.encodeInt64(br, i,
          org.apache.hadoop.hbase.util.Order.DESCENDING);
  Put p = new Put(bytes);
  p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
  table.put(p);
}

table.flushCommits();
table.close();

admin.flush(tableName);
}
 
Example 43
Project: aliyun-maxcompute-data-collectors   File: HBasePutProcessor.java   Source Code and License 5 votes vote down vote up
@Override
@SuppressWarnings("unchecked")
public void setConf(Configuration config) {
  this.conf = config;

  // Get the implementation of PutTransformer to use.
  // By default, we call toString() on every non-null field.
  Class<? extends PutTransformer> xformerClass =
      (Class<? extends PutTransformer>)
      this.conf.getClass(TRANSFORMER_CLASS_KEY, ToStringPutTransformer.class);
  this.putTransformer = (PutTransformer)
      ReflectionUtils.newInstance(xformerClass, this.conf);
  if (null == putTransformer) {
    throw new RuntimeException("Could not instantiate PutTransformer.");
  }

  this.putTransformer.setColumnFamily(conf.get(COL_FAMILY_KEY, null));
  this.putTransformer.setRowKeyColumn(conf.get(ROW_KEY_COLUMN_KEY, null));

  if (this.putTransformer instanceof ToStringPutTransformer) {
    ToStringPutTransformer stringPutTransformer =
        (ToStringPutTransformer) this.putTransformer;
    stringPutTransformer.bigDecimalFormatString =
        conf.getBoolean(ImportJobBase.PROPERTY_BIGDECIMAL_FORMAT,
            ImportJobBase.PROPERTY_BIGDECIMAL_FORMAT_DEFAULT);
    stringPutTransformer.addRowKey =
        conf.getBoolean(HBasePutProcessor.ADD_ROW_KEY,
            HBasePutProcessor.ADD_ROW_KEY_DEFAULT);
    stringPutTransformer.detectCompositeKey();
  }

  this.tableName = conf.get(TABLE_NAME_KEY, null);
  try {
    this.table = new HTable(conf, this.tableName);
  } catch (IOException ioe) {
    throw new RuntimeException("Could not access HBase table " + tableName,
        ioe);
  }
  this.table.setAutoFlush(false);
}
 
Example 44
Project: ditb   File: TestLoadAndSwitchEncodeOnDisk.java   Source Code and License 5 votes vote down vote up
private void assertAllOnLine(final HTable t) throws IOException {
  NavigableMap<HRegionInfo, ServerName> regions = t.getRegionLocations();
  for (Map.Entry<HRegionInfo, ServerName> e: regions.entrySet()) {
    byte [] startkey = e.getKey().getStartKey();
    Scan s = new Scan(startkey);
    ResultScanner scanner = t.getScanner(s);
    Result r = scanner.next();
    org.junit.Assert.assertTrue(r != null && r.size() > 0);
    scanner.close();
  }
}
 
Example 45
Project: ditb   File: DITBInserterBase.java   Source Code and License 5 votes vote down vote up
public void insertData() throws IOException, InterruptedException {
  HTable table = new HTable(conf, tableName);
  DateFormat dateFormat = new SimpleDateFormat("yyyy/MM/dd HH:mm:ss");
  int counter = 0;
  long start = 0;
  while (true) {
    if (queue.isEmpty()) {
      if (threadFinishMark[id]) {
        break;
      } else {
        Thread.sleep(SLEEP_INTERVAL);
        continue;
      }
    }
    if (CAL_LATENCY) {
      start = System.currentTimeMillis();
    }
    insertOneRecord(queue.poll());
    if (CAL_LATENCY) {
      updateLatency(System.currentTimeMillis() - start);
    }
    if (counter == PRINT_INTERVAL) {
      counter = 0;
      printAndAddtoReportQueue(
          "coffey thread " + id + " insert data " + doneSize + " class: " + this.getClass()
              .getName() + ", time: " + dateFormat.format(new Date()));
    }
    ++counter;
    ++doneSize;
  }
  table.close();
  printAndAddtoReportQueue("coffey totally insert " + doneSize + " records");
  synchronized (syncBoxObj) {
    totalDoneSize += doneSize;
  }
}
 
Example 46
Project: ditb   File: HBaseTestingUtility.java   Source Code and License 5 votes vote down vote up
/**
 * Returns all rows from the hbase:meta table.
 *
 * @throws IOException When reading the rows fails.
 */
public List<byte[]> getMetaTableRows() throws IOException {
  // TODO: Redo using MetaTableAccessor class
  Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
  List<byte[]> rows = new ArrayList<byte[]>();
  ResultScanner s = t.getScanner(new Scan());
  for (Result result : s) {
    LOG.info("getMetaTableRows: row -> " +
      Bytes.toStringBinary(result.getRow()));
    rows.add(result.getRow());
  }
  s.close();
  t.close();
  return rows;
}
 
Example 47
Project: ditb   File: TestServerCustomProtocol.java   Source Code and License 5 votes vote down vote up
@Test
public void testSingleMethod() throws Throwable {
  try (HTable table = new HTable(util.getConfiguration(), TEST_TABLE)) {
    RegionLocator locator = table.getRegionLocator();
    Map<byte [], String> results = table.coprocessorService(PingProtos.PingService.class,
      null, ROW_A,
      new Batch.Call<PingProtos.PingService, String>() {
        @Override
        public String call(PingProtos.PingService instance) throws IOException {
          BlockingRpcCallback<PingProtos.PingResponse> rpcCallback =
            new BlockingRpcCallback<PingProtos.PingResponse>();
          instance.ping(null, PingProtos.PingRequest.newBuilder().build(), rpcCallback);
          return rpcCallback.get().getPong();
        }
      });
    // Should have gotten results for 1 of the three regions only since we specified
    // rows from 1 region
    assertEquals(1, results.size());
    verifyRegionResults(locator, results, ROW_A);

    final String name = "NAME";
    results = hello(table, name, null, ROW_A);
    // Should have gotten results for 1 of the three regions only since we specified
    // rows from 1 region
    assertEquals(1, results.size());
    verifyRegionResults(locator, results, "Hello, NAME", ROW_A);
  }
}
 
Example 48
Project: ditb   File: TestMultiVersions.java   Source Code and License 5 votes vote down vote up
/**
* Tests user specifiable time stamps putting, getting and scanning.  Also
 * tests same in presence of deletes.  Test cores are written so can be
 * run against an HRegion and against an HTable: i.e. both local and remote.
 * 
 * <p>Port of old TestTimestamp test to here so can better utilize the spun
 * up cluster running more than a single test per spin up.  Keep old tests'
 * crazyness.
 */
@Test
public void testTimestamps() throws Exception {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("testTimestamps"));
  HColumnDescriptor hcd = new HColumnDescriptor(TimestampTestBase.FAMILY_NAME);
  hcd.setMaxVersions(3);
  desc.addFamily(hcd);
  this.admin.createTable(desc);
  Table table = new HTable(UTIL.getConfiguration(), desc.getTableName());
  // TODO: Remove these deprecated classes or pull them in here if this is
  // only test using them.
  Incommon incommon = new HTableIncommon(table);
  TimestampTestBase.doTestDelete(incommon, new FlushCache() {
    public void flushcache() throws IOException {
      UTIL.getHBaseCluster().flushcache();
    }
   });

  // Perhaps drop and readd the table between tests so the former does
  // not pollute this latter?  Or put into separate tests.
  TimestampTestBase.doTestTimestampScanning(incommon, new FlushCache() {
    public void flushcache() throws IOException {
      UTIL.getMiniHBaseCluster().flushcache();
    }
  });

  table.close();
}
 
Example 49
Project: ditb   File: TestConstraint.java   Source Code and License 5 votes vote down vote up
/**
 * Test that constraints will fail properly
 * @throws Exception
 */
@SuppressWarnings("unchecked")
@Test(timeout = 60000)
public void testConstraintFails() throws Exception {

  // create the table
  // it would be nice if this was also a method on the util
  HTableDescriptor desc = new HTableDescriptor(tableName);
  for (byte[] family : new byte[][] { dummy, test }) {
    desc.addFamily(new HColumnDescriptor(family));
  }

  // add a constraint that is sure to fail
  Constraints.add(desc, AllFailConstraint.class);

  util.getHBaseAdmin().createTable(desc);
  Table table = new HTable(util.getConfiguration(), tableName);

  // test that we do fail on violation
  Put put = new Put(row1);
  put.add(dummy, new byte[0], "fail".getBytes());
  LOG.warn("Doing put in table");
  try {
    table.put(put);
    fail("This put should not have suceeded - AllFailConstraint was not run!");
  } catch (RetriesExhaustedWithDetailsException e) {
    List<Throwable> causes = e.getCauses();
    assertEquals(
        "More than one failure cause - should only be the failure constraint exception",
        1, causes.size());
    Throwable t = causes.get(0);
    assertEquals(ConstraintException.class, t.getClass());
  }
  table.close();
}
 
Example 50
Project: ditb   File: HBaseTestingUtility.java   Source Code and License 5 votes vote down vote up
/**
 * Create a table.
 * @param tableName
 * @param family
 * @param splitRows
 * @return An HTable instance for the created table.
 * @throws IOException
 */
public HTable createTable(TableName tableName, byte[] family, byte[][] splitRows)
    throws IOException {
  HTableDescriptor desc = new HTableDescriptor(tableName);
  HColumnDescriptor hcd = new HColumnDescriptor(family);
  desc.addFamily(hcd);
  getHBaseAdmin().createTable(desc, splitRows);
  // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
  waitUntilAllRegionsAssigned(tableName);
  return new HTable(getConfiguration(), tableName);
}
 
Example 51
Project: ditb   File: TestMasterReplication.java   Source Code and License 5 votes vote down vote up
@SuppressWarnings("resource")
private Table[] getHTablesOnClusters(TableName tableName) throws Exception {
  int numClusters = utilities.length;
  Table[] htables = new Table[numClusters];
  for (int i = 0; i < numClusters; i++) {
    Table htable = new HTable(configurations[i], tableName);
    htable.setWriteBufferSize(1024);
    htables[i] = htable;
  }
  return htables;
}
 
Example 52
Project: ditb   File: HBaseTestingUtility.java   Source Code and License 5 votes vote down vote up
/**
 * Create a table.
 * @param tableName
 * @param families
 * @param numVersions
 * @param splitKeys
 * @return An HTable instance for the created table.
 * @throws IOException
 */
public HTable createTable(TableName tableName, byte[][] families, int numVersions,
    byte[][] splitKeys) throws IOException {
  HTableDescriptor desc = new HTableDescriptor(tableName);
  for (byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family).setMaxVersions(numVersions);
    desc.addFamily(hcd);
  }
  getHBaseAdmin().createTable(desc, splitKeys);
  // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
  waitUntilAllRegionsAssigned(tableName);
  return new HTable(new Configuration(getConfiguration()), tableName);
}
 
Example 53
Project: ditb   File: TestHFileOutputFormat.java   Source Code and License 5 votes vote down vote up
/**
 * Test for {@link HFileOutputFormat#configureBloomType(org.apache.hadoop.hbase.client.Table,
 * Configuration)} and {@link HFileOutputFormat#createFamilyBloomTypeMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Test
public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException {
  for (int numCfs = 0; numCfs <= 2; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, BloomType> familyToBloomType =
        getMockColumnFamiliesForBloomType(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForBloomType(table,
        familyToBloomType);
    HFileOutputFormat.configureBloomType(table, conf);

    // read back family specific data block encoding settings from the
    // configuration
    Map<byte[], BloomType> retrievedFamilyToBloomTypeMap =
        HFileOutputFormat
            .createFamilyBloomTypeMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, BloomType> entry : familyToBloomType.entrySet()) {
      assertEquals("BloomType configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes()));
    }
  }
}
 
Example 54
Project: ditb   File: HBaseTestingUtility.java   Source Code and License 5 votes vote down vote up
/**
 * Create a table.
 * @param tableName
 * @param families
 * @param c Configuration to use
 * @param numVersions
 * @return An HTable instance for the created table.
 * @throws IOException
 */
public HTable createTable(byte[] tableName, byte[][] families,
    final Configuration c, int numVersions)
throws IOException {
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName));
  for(byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family)
        .setMaxVersions(numVersions);
    desc.addFamily(hcd);
  }
  getHBaseAdmin().createTable(desc);
  return new HTable(c, desc.getTableName());
}
 
Example 55
Project: ditb   File: HBaseTestingUtility.java   Source Code and License 5 votes vote down vote up
/**
 * Starts the hbase cluster up again after shutting it down previously in a
 * test.  Use this if you want to keep dfs/zk up and just stop/start hbase.
 * @param servers number of region servers
 * @throws IOException
 */
public void restartHBaseCluster(int servers) throws IOException, InterruptedException {
  this.hbaseCluster = new MiniHBaseCluster(this.conf, servers);
  // Don't leave here till we've done a successful scan of the hbase:meta
  Table t = new HTable(new Configuration(this.conf), TableName.META_TABLE_NAME);
  ResultScanner s = t.getScanner(new Scan());
  while (s.next() != null) {
    // do nothing
  }
  LOG.info("HBase has been restarted");
  s.close();
  t.close();
}
 
Example 56
Project: ditb   File: TestAcidGuarantees.java   Source Code and License 5 votes vote down vote up
public AtomicGetReader(TestContext ctx, byte targetRow[],
                       byte targetFamilies[][]) throws IOException {
  super(ctx);
  this.targetRow = targetRow;
  this.targetFamilies = targetFamilies;
  table = new HTable(ctx.getConf(), TABLE_NAME);
}
 
Example 57
Project: ditb   File: HBaseTestingUtility.java   Source Code and License 5 votes vote down vote up
/**
 * Create a table with multiple regions.
 * @param tableName
 * @param family
 * @param numRegions
 * @return An HTable instance for the created table.
 * @throws IOException
 */
public HTable createMultiRegionTable(TableName tableName, byte[] family, int numRegions)
    throws IOException {
  if (numRegions < 3) throw new IOException("Must create at least 3 regions");
  byte[] startKey = Bytes.toBytes("aaaaa");
  byte[] endKey = Bytes.toBytes("zzzzz");
  byte[][] splitKeys = Bytes.split(startKey, endKey, numRegions - 3);

  return createTable(tableName, new byte[][] { family }, splitKeys);
}
 
Example 58
Project: ditb   File: TestHFileOutputFormat2.java   Source Code and License 5 votes vote down vote up
/**
 * Test for {@link HFileOutputFormat2#configureBloomType(org.apache.hadoop.hbase.client.Table,
 * Configuration)} and {@link HFileOutputFormat2#createFamilyBloomTypeMap
 * (Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test
public void testSerializeDeserializeFamilyBloomTypeMap() throws IOException {
  for (int numCfs = 0; numCfs <= 2; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, BloomType> familyToBloomType =
        getMockColumnFamiliesForBloomType(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForBloomType(table,
        familyToBloomType);
    HFileOutputFormat2.configureBloomType(table.getTableDescriptor(), conf);

    // read back family specific data block encoding settings from the
    // configuration
    Map<byte[], BloomType> retrievedFamilyToBloomTypeMap =
        HFileOutputFormat2
            .createFamilyBloomTypeMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, BloomType> entry : familyToBloomType.entrySet()) {
      assertEquals("BloomType configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToBloomTypeMap.get(entry.getKey().getBytes()));
    }
  }
}
 
Example 59
Project: ditb   File: TestEncryptionKeyRotation.java   Source Code and License 5 votes vote down vote up
private void createTableAndFlush(HTableDescriptor htd) throws Exception {
  HColumnDescriptor hcd = htd.getFamilies().iterator().next();
  // Create the test table
  TEST_UTIL.getHBaseAdmin().createTable(htd);
  TEST_UTIL.waitTableAvailable(htd.getName(), 5000);
  // Create a store file
  Table table = new HTable(conf, htd.getTableName());
  try {
    table.put(new Put(Bytes.toBytes("testrow"))
      .add(hcd.getName(), Bytes.toBytes("q"), Bytes.toBytes("value")));
  } finally {
    table.close();
  }
  TEST_UTIL.getHBaseAdmin().flush(htd.getTableName());
}
 
Example 60
Project: ditb   File: HBaseTestingUtility.java   Source Code and License 5 votes vote down vote up
public HTable createTable(TableName tableName, byte[][] families,
    int numVersions, byte[] startKey, byte[] endKey, int numRegions)
throws IOException{
  HTableDescriptor desc = new HTableDescriptor(tableName);
  for (byte[] family : families) {
    HColumnDescriptor hcd = new HColumnDescriptor(family)
        .setMaxVersions(numVersions);
    desc.addFamily(hcd);
  }
  getHBaseAdmin().createTable(desc, startKey, endKey, numRegions);
  // HBaseAdmin only waits for regions to appear in hbase:meta we should wait until they are assigned
  waitUntilAllRegionsAssigned(tableName);
  return new HTable(getConfiguration(), tableName);
}