Java Code Examples for org.apache.hadoop.hbase.client.HTable

The following are top voted examples for showing how to use org.apache.hadoop.hbase.client.HTable. These examples are extracted from open source projects. You can vote up the examples you like and your votes will be used in our system to generate more good examples.
Example 1
Project: ditb   File: OfflineMetaRebuildTestCore.java   View source code 11 votes vote down vote up
protected void wipeOutMeta() throws IOException {
  // Mess it up by blowing up meta.
  Admin admin = TEST_UTIL.getHBaseAdmin();
  Scan s = new Scan();
  Table meta = new HTable(conf, TableName.META_TABLE_NAME);
  ResultScanner scanner = meta.getScanner(s);
  List<Delete> dels = new ArrayList<Delete>();
  for (Result r : scanner) {
    HRegionInfo info =
        HRegionInfo.getHRegionInfo(r);
    if(info != null && !info.getTable().getNamespaceAsString()
        .equals(NamespaceDescriptor.SYSTEM_NAMESPACE_NAME_STR)) {
      Delete d = new Delete(r.getRow());
      dels.add(d);
      admin.unassign(r.getRow(), true);
    }
  }
  meta.delete(dels);
  scanner.close();
  meta.close();
}
 
Example 2
Project: SparkDemo   File: MyClass.java   View source code 6 votes vote down vote up
public static void QueryByCondition2(String tableName) {

        try {
            HTablePool pool = new HTablePool(configuration, 1000);
            HTable table = (HTable) pool.getTable(tableName);
            Filter filter = new SingleColumnValueFilter(Bytes
                    .toBytes("column1"), null, CompareOp.EQUAL, Bytes
                    .toBytes("aaa")); // 当列column1的值为aaa时进行查询
            Scan s = new Scan();
            s.setFilter(filter);
            ResultScanner rs = table.getScanner(s);
            for (Result r : rs) {
                System.out.println("获得到rowkey:" + new String(r.getRow()));
                for (KeyValue keyValue : r.raw()) {
                    System.out.println("列:" + new String(keyValue.getFamily())
                            + "====值:" + new String(keyValue.getValue()));
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }

    }
 
Example 3
Project: ditb   File: TestHBaseFsck.java   View source code 6 votes vote down vote up
/**
 * Setup a clean table with a certain region_replica count
 *
 * It will set tbl which needs to be closed after test
 *
 * @param tableName
 * @param replicaCount
 * @throws Exception
 */
void setupTableWithRegionReplica(TableName tablename, int replicaCount) throws Exception {
  HTableDescriptor desc = new HTableDescriptor(tablename);
  desc.setRegionReplication(replicaCount);
  HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
  desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
  createTable(TEST_UTIL, desc, SPLITS);

  tbl = (HTable) connection.getTable(tablename, tableExecutorService);
  List<Put> puts = new ArrayList<Put>();
  for (byte[] row : ROWKEYS) {
    Put p = new Put(row);
    p.add(FAM, Bytes.toBytes("val"), row);
    puts.add(p);
  }
  tbl.put(puts);
  tbl.flushCommits();
}
 
Example 4
Project: HBase-High-Performance-Cookbook   File: HBaseRegularClient.java   View source code 6 votes vote down vote up
/**
 * Getting all records  a row from an existing SS tables 
 * @method getAllRecord
 * @inputParameters hbaseBtable Name used
 * @return type: no return type as its a void method 
 * 
 **/
@SuppressWarnings({ "deprecation", "resource" })
public static void getAllRecord(String myHbaseBtableName) {
  ResultScanner hbaseBSs = null;
  try {
    HTable hbaseBtable = new HTable(hbaseBconf, myHbaseBtableName);
    Scan hbaseBScan = new Scan();
    hbaseBSs = hbaseBtable.getScanner(hbaseBScan);
    for (Result r : hbaseBSs) {
      for (KeyValue hbaseBkv : r.raw()) {
        System.out.print(new String(hbaseBkv.getRow()) + " ");
        System.out.print(new String(hbaseBkv.getFamily()) + ":");
        System.out.print(new String(hbaseBkv.getQualifier()) + " ");
        System.out.print(hbaseBkv.getTimestamp() + " ");
        System.out.println(new String(hbaseBkv.getValue()));
      }
    }
  } catch (IOException eio) {
    eip.printStackTrace();
  } finally {
    if (hbaseBSs != null) hbaseBSs.close();
    // closing the ss hbaseBtable 
  }
}
 
Example 5
Project: ditb   File: IntegrationTestBigLinkedList.java   View source code 6 votes vote down vote up
@Override
public int run(String[] args) throws Exception {
  if (args.length != 1) {
    System.out.println("Usage : " + Delete.class.getSimpleName() + " <node to delete>");
    return 0;
  }
  byte[] val = Bytes.toBytesBinary(args[0]);

  org.apache.hadoop.hbase.client.Delete delete
    = new org.apache.hadoop.hbase.client.Delete(val);

  Table table = new HTable(getConf(), getTableName(getConf()));
  table.delete(delete);
  table.close();

  System.out.println("Delete successful");
  return 0;
}
 
Example 6
Project: ditb   File: TestMultiTableInputFormat.java   View source code 6 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  // switch TIF to log at DEBUG level
  TEST_UTIL.enableDebug(MultiTableInputFormat.class);
  TEST_UTIL.enableDebug(MultiTableInputFormatBase.class);
  TEST_UTIL.setJobWithoutMRCluster();
  // start mini hbase cluster
  TEST_UTIL.startMiniCluster(3);
  // create and fill table
  for (int i = 0; i < 3; i++) {
    try (HTable table =
        TEST_UTIL.createMultiRegionTable(TableName.valueOf(TABLE_NAME + String.valueOf(i)),
          INPUT_FAMILY, 4)) {
      TEST_UTIL.loadTable(table, INPUT_FAMILY, false);
    }
  }
}
 
Example 7
Project: Transwarp-Sample-Code   File: LobUtil.java   View source code 6 votes vote down vote up
/**
 * 上传对象到LOB
 * @param tableName Hyperbase表名
 * @param row rowkey byte形式
 * @param filename 文件名
 * @param fileData 文件
 */
public void putLob(String tableName, String row, String filename, byte[] fileData){
    byte[] rowkey = Bytes.toBytes(row);
    try {
        HTable htable = new HTable(conf, tableName);
        Put put = new Put(rowkey);
        put.add(Bytes.toBytes(family1), Bytes.toBytes(f1_q1), Bytes.toBytes(filename));
        put.add(Bytes.toBytes(family2), Bytes.toBytes(f2_q1), fileData);
        htable.put(put);
        htable.flushCommits();
        htable.close();
    } catch (IOException e1) {
        // TODO Auto-generated catch block
        e1.printStackTrace();
    }
}
 
Example 8
Project: Transwarp-Sample-Code   File: udtfCheck.java   View source code 6 votes vote down vote up
@Override
public void process(Object[] record) throws HiveException {
    final String document = (String) stringOI.getPrimitiveJavaObject(record[0]);

    if (document == null) {
        return;
    }

    String[] tokens = document.split(",");
    String[] results = tokens[1].split(" ");

    try {
        hTable = new HTable(conf, "bi");
        Get get = new Get(Bytes.toBytes(tokens[0]));
        result = hTable.exists(get);
    } catch (Exception e) {
        e.printStackTrace();
    }

    if (!result) {
        for (String r : results) {
            forward(new Object[]{tokens[0], r});
        }
    }
}
 
Example 9
Project: ditb   File: TestLoadIncrementalHFiles.java   View source code 6 votes vote down vote up
private void runTest(String testName, HTableDescriptor htd, BloomType bloomType,
    boolean preCreateTable, byte[][] tableSplitKeys, byte[][][] hfileRanges) throws Exception {

  for (boolean managed : new boolean[] { true, false }) {
    Path dir = util.getDataTestDirOnTestFS(testName);
    FileSystem fs = util.getTestFileSystem();
    dir = dir.makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(FAMILY));

    int hfileIdx = 0;
    for (byte[][] range : hfileRanges) {
      byte[] from = range[0];
      byte[] to = range[1];
      HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_"
          + hfileIdx++), FAMILY, QUALIFIER, from, to, 1000);
    }
    int expectedRows = hfileIdx * 1000;

    if (preCreateTable) {
      util.getHBaseAdmin().createTable(htd, tableSplitKeys);
    }

    final TableName tableName = htd.getTableName();
    if (!util.getHBaseAdmin().tableExists(tableName)) {
      util.getHBaseAdmin().createTable(htd);
    }
    LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration());

    if (managed) {
      try (HTable table = new HTable(util.getConfiguration(), tableName)) {
        loader.doBulkLoad(dir, table);
        assertEquals(expectedRows, util.countRows(table));
      }
    } else {
      try (Connection conn = ConnectionFactory.createConnection(util.getConfiguration());
          HTable table = (HTable) conn.getTable(tableName)) {
        loader.doBulkLoad(dir, table);
      }
    }

    // verify staging folder has been cleaned up
    Path stagingBasePath = SecureBulkLoadUtil.getBaseStagingDir(util.getConfiguration());
    if (fs.exists(stagingBasePath)) {
      FileStatus[] files = fs.listStatus(stagingBasePath);
      for (FileStatus file : files) {
        assertTrue("Folder=" + file.getPath() + " is not cleaned up.",
            file.getPath().getName() != "DONOTERASE");
      }
    }

    util.deleteTable(tableName);
  }
}
 
Example 10
Project: ditb   File: TestScannersWithLabels.java   View source code 6 votes vote down vote up
private static int insertData(TableName tableName, String column, double prob) throws IOException {
  byte[] k = new byte[3];
  byte[][] famAndQf = KeyValue.parseColumn(Bytes.toBytes(column));

  List<Put> puts = new ArrayList<>();
  for (int i = 0; i < 9; i++) {
    Put put = new Put(Bytes.toBytes("row" + i));
    put.setDurability(Durability.SKIP_WAL);
    put.add(famAndQf[0], famAndQf[1], k);
    put.setCellVisibility(new CellVisibility("(" + SECRET + "|" + CONFIDENTIAL + ")" + "&" + "!"
        + TOPSECRET));
    puts.add(put);
  }
  try (Table table = new HTable(TEST_UTIL.getConfiguration(), tableName)) {
    table.put(puts);
  }
  return puts.size();
}
 
Example 11
Project: aliyun-maxcompute-data-collectors   File: HBaseBulkImportJob.java   View source code 6 votes vote down vote up
@Override
protected void jobSetup(Job job) throws IOException, ImportException {
  super.jobSetup(job);

  // we shouldn't have gotten here if bulk load dir is not set
  // so let's throw a ImportException
  if(getContext().getDestination() == null){
    throw new ImportException("Can't run HBaseBulkImportJob without a " +
        "valid destination directory.");
  }

  TableMapReduceUtil.addDependencyJars(job.getConfiguration(), Preconditions.class);
  FileOutputFormat.setOutputPath(job, getContext().getDestination());
  HTable hTable = new HTable(job.getConfiguration(), options.getHBaseTable());
  HFileOutputFormat.configureIncrementalLoad(job, hTable);
}
 
Example 12
Project: ditb   File: TestHFileOutputFormat.java   View source code 6 votes vote down vote up
private void runIncrementalPELoad(
    Configuration conf, HTable table, Path outDir)
throws Exception {
  Job job = new Job(conf, "testLocalMRIncrementalLoad");
  job.setWorkingDirectory(util.getDataTestDirOnTestFS("runIncrementalPELoad"));
  job.getConfiguration().setStrings("io.serializations", conf.get("io.serializations"),
      MutationSerialization.class.getName(), ResultSerialization.class.getName(),
      KeyValueSerialization.class.getName());
  setupRandomGeneratorMapper(job);
  HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(),
      table.getRegionLocator());
  FileOutputFormat.setOutputPath(job, outDir);

  Assert.assertFalse( util.getTestFileSystem().exists(outDir)) ;

  assertEquals(table.getRegionLocator().getAllRegionLocations().size(), job.getNumReduceTasks());

  assertTrue(job.waitForCompletion(true));
}
 
Example 13
Project: ditb   File: TestQuotaThrottle.java   View source code 6 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
  TEST_UTIL.getConfiguration().setBoolean(QuotaUtil.QUOTA_CONF_KEY, true);
  TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 10);
  TEST_UTIL.getConfiguration().setInt("hbase.regionserver.msginterval", 100);
  TEST_UTIL.getConfiguration().setInt("hbase.client.pause", 250);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 6);
  TEST_UTIL.getConfiguration().setBoolean("hbase.master.enabletable.roundrobin", true);
  TEST_UTIL.startMiniCluster(1);
  TEST_UTIL.waitTableAvailable(QuotaTableUtil.QUOTA_TABLE_NAME);
  QuotaCache.setTEST_FORCE_REFRESH(true);

  tables = new HTable[TABLE_NAMES.length];
  for (int i = 0; i < TABLE_NAMES.length; ++i) {
    tables[i] = TEST_UTIL.createTable(TABLE_NAMES[i], FAMILY);
  }
}
 
Example 14
Project: ditb   File: TableInputFormatBase.java   View source code 6 votes vote down vote up
/**
 * Allows subclasses to set the {@link HTable}.
 *
 * Will attempt to reuse the underlying Connection for our own needs, including
 * retreiving an Admin interface to the HBase cluster.
 *
 * @param table  The table to get the data from.
 * @throws IOException 
 * @deprecated Use {@link #initializeTable(Connection, TableName)} instead.
 */
@Deprecated
protected void setHTable(HTable table) throws IOException {
  this.table = table;
  this.connection = table.getConnection();
  try {
    this.regionLocator = table.getRegionLocator();
    this.admin = this.connection.getAdmin();
  } catch (NeedUnmanagedConnectionException exception) {
    LOG.warn("You are using an HTable instance that relies on an HBase-managed Connection. " +
        "This is usually due to directly creating an HTable, which is deprecated. Instead, you " +
        "should create a Connection object and then request a Table instance from it. If you " +
        "don't need the Table instance for your own use, you should instead use the " +
        "TableInputFormatBase.initalizeTable method directly.");
    LOG.info("Creating an additional unmanaged connection because user provided one can't be " +
        "used for administrative actions. We'll close it when we close out the table.");
    LOG.debug("Details about our failure to request an administrative interface.", exception);
    // Do we need a "copy the settings from this Connection" method? are things like the User
    // properly maintained by just looking again at the Configuration?
    this.connection = ConnectionFactory.createConnection(this.connection.getConfiguration());
    this.regionLocator = this.connection.getRegionLocator(table.getName());
    this.admin = this.connection.getAdmin();
  }
}
 
Example 15
Project: ditb   File: TestZKBasedOpenCloseRegion.java   View source code 6 votes vote down vote up
@BeforeClass public static void beforeAllTests() throws Exception {
  Configuration c = TEST_UTIL.getConfiguration();
  c.setBoolean("hbase.assignment.usezk", true);
  c.setBoolean("dfs.support.append", true);
  c.setInt("hbase.regionserver.info.port", 0);
  TEST_UTIL.startMiniCluster(2);
  TEST_UTIL.createMultiRegionTable(TABLENAME, FAMILIES);
  HTable t = new HTable(TEST_UTIL.getConfiguration(), TABLENAME);
  countOfRegions = -1;
  try (RegionLocator r = t.getRegionLocator()) {
    countOfRegions = r.getStartKeys().length;
  }
  waitUntilAllRegionsAssigned();
  addToEachStartKey(countOfRegions);
  t.close();
  TEST_UTIL.getHBaseCluster().getMaster().assignmentManager.initializeHandlerTrackers();
}
 
Example 16
Project: ditb   File: TestLogRollPeriod.java   View source code 6 votes vote down vote up
/**
 * Tests that the LogRoller perform the roll even if there are no edits
 */
@Test
public void testNoEdits() throws Exception {
  TableName tableName = TableName.valueOf("TestLogRollPeriodNoEdits");
  TEST_UTIL.createTable(tableName, "cf");
  try {
    Table table = new HTable(TEST_UTIL.getConfiguration(), tableName);
    try {
      HRegionServer server = TEST_UTIL.getRSForFirstRegionInTable(tableName);
      WAL log = server.getWAL(null);
      checkMinLogRolls(log, 5);
    } finally {
      table.close();
    }
  } finally {
    TEST_UTIL.deleteTable(tableName);
  }
}
 
Example 17
Project: ditb   File: TestCoprocessorEndpoint.java   View source code 6 votes vote down vote up
@BeforeClass
public static void setupBeforeClass() throws Exception {
  // set configure to indicate which cp should be loaded
  Configuration conf = util.getConfiguration();
  conf.setInt(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 5000);
  conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
      org.apache.hadoop.hbase.coprocessor.ColumnAggregationEndpoint.class.getName(),
      ProtobufCoprocessorService.class.getName());
  conf.setStrings(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY,
      ProtobufCoprocessorService.class.getName());
  util.startMiniCluster(2);

  Admin admin = util.getHBaseAdmin();
  HTableDescriptor desc = new HTableDescriptor(TEST_TABLE);
  desc.addFamily(new HColumnDescriptor(TEST_FAMILY));
  admin.createTable(desc, new byte[][]{ROWS[rowSeperator1], ROWS[rowSeperator2]});
  util.waitUntilAllRegionsAssigned(TEST_TABLE);

  Table table = new HTable(conf, TEST_TABLE);
  for (int i = 0; i < ROWSIZE; i++) {
    Put put = new Put(ROWS[i]);
    put.addColumn(TEST_FAMILY, TEST_QUALIFIER, Bytes.toBytes(i));
    table.put(put);
  }
  table.close();
}
 
Example 18
Project: ditb   File: TestMultiRowRangeFilter.java   View source code 6 votes vote down vote up
@Test
public void testMultiRowRangeFilterWithoutRangeOverlap() throws IOException {
  tableName = Bytes.toBytes("testMultiRowRangeFilterWithoutRangeOverlap");
  HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
  generateRows(numRows, ht, family, qf, value);

  Scan scan = new Scan();
  scan.setMaxVersions();

  List<RowRange> ranges = new ArrayList<RowRange>();
  ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));
  ranges.add(new RowRange(Bytes.toBytes(10), true, Bytes.toBytes(20), false));
  ranges.add(new RowRange(Bytes.toBytes(60), true, Bytes.toBytes(70), false));

  MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
  scan.setFilter(filter);
  int resultsSize = getResultsSize(ht, scan);
  LOG.info("found " + resultsSize + " results");
  List<Cell> results1 = getScanResult(Bytes.toBytes(10), Bytes.toBytes(20), ht);
  List<Cell> results2 = getScanResult(Bytes.toBytes(30), Bytes.toBytes(40), ht);
  List<Cell> results3 = getScanResult(Bytes.toBytes(60), Bytes.toBytes(70), ht);

  assertEquals(results1.size() + results2.size() + results3.size(), resultsSize);

  ht.close();
}
 
Example 19
Project: ditb   File: TestOpenTableInCoprocessor.java   View source code 6 votes vote down vote up
private void runCoprocessorConnectionToRemoteTable(Class<? extends BaseRegionObserver> clazz,
    boolean[] completeCheck) throws Throwable {
  HTableDescriptor primary = new HTableDescriptor(primaryTable);
  primary.addFamily(new HColumnDescriptor(family));
  // add our coprocessor
  primary.addCoprocessor(clazz.getName());

  HTableDescriptor other = new HTableDescriptor(otherTable);
  other.addFamily(new HColumnDescriptor(family));


  Admin admin = UTIL.getHBaseAdmin();
  admin.createTable(primary);
  admin.createTable(other);

  Table table = new HTable(UTIL.getConfiguration(), TableName.valueOf("primary"));
  Put p = new Put(new byte[] { 'a' });
  p.add(family, null, new byte[] { 'a' });
  table.put(p);
  table.close();

  Table target = new HTable(UTIL.getConfiguration(), otherTable);
  assertTrue("Didn't complete update to target table!", completeCheck[0]);
  assertEquals("Didn't find inserted row", 1, getKeyValueCount(target));
  target.close();
}
 
Example 20
Project: ditb   File: TestMultiRowRangeFilter.java   View source code 6 votes vote down vote up
@Test
public void testMultiRowRangeFilterWithEmptyStartRow() throws IOException {
  tableName = Bytes.toBytes("testMultiRowRangeFilterWithEmptyStartRow");
  HTable ht = TEST_UTIL.createTable(tableName, family, Integer.MAX_VALUE);
  generateRows(numRows, ht, family, qf, value);
  Scan scan = new Scan();
  scan.setMaxVersions();

  List<RowRange> ranges = new ArrayList<RowRange>();
  ranges.add(new RowRange(Bytes.toBytes(""), true, Bytes.toBytes(10), false));
  ranges.add(new RowRange(Bytes.toBytes(30), true, Bytes.toBytes(40), false));

  MultiRowRangeFilter filter = new MultiRowRangeFilter(ranges);
  scan.setFilter(filter);
  int resultsSize = getResultsSize(ht, scan);
  List<Cell> results1 = getScanResult(Bytes.toBytes(""), Bytes.toBytes(10), ht);
  List<Cell> results2 = getScanResult(Bytes.toBytes(30), Bytes.toBytes(40), ht);
  assertEquals(results1.size() + results2.size(), resultsSize);

  ht.close();
}
 
Example 21
Project: ditb   File: TestNamespaceUpgrade.java   View source code 6 votes vote down vote up
@Test (timeout=300000)
public void testSnapshots() throws IOException, InterruptedException {
  String snapshots[][] = {snapshot1Keys, snapshot2Keys};
  for(int i = 1; i <= snapshots.length; i++) {
    for(TableName table: tables) {
      TEST_UTIL.getHBaseAdmin().cloneSnapshot(table+"_snapshot"+i, TableName.valueOf(table+"_clone"+i));
      FSUtils.logFileSystemState(FileSystem.get(TEST_UTIL.getConfiguration()),
          FSUtils.getRootDir(TEST_UTIL.getConfiguration()),
          LOG);
      int count = 0;
      for(Result res: new HTable(TEST_UTIL.getConfiguration(), table+"_clone"+i).getScanner(new
          Scan())) {
        assertEquals(snapshots[i-1][count++], Bytes.toString(res.getRow()));
      }
      Assert.assertEquals(table+"_snapshot"+i, snapshots[i-1].length, count);
    }
  }
}
 
Example 22
Project: ditb   File: IndexChooser.java   View source code 6 votes vote down vote up
public IndexChooser(final IndexTable indexTable) throws IOException {
  this.indexTable = indexTable;
  indexRegionMaps = new TreeMap<byte[], List<HRegionInfo>>(Bytes.BYTES_COMPARATOR);

  for (Map.Entry<byte[], Table> entry : indexTable.getIndexTableMaps().entrySet()) {
    if (!(entry.getValue() instanceof HTable)) {
      throw new IOException(
          "table is not an instance of HTable, it is " + entry.getValue().getClass().getName());
    }
    HTable htable = (HTable) entry.getValue();
    ArrayList<HRegionInfo> list =
        new ArrayList<HRegionInfo>(htable.getRegionLocations().keySet());
    indexRegionMaps.put(entry.getKey(), list);
  }

  speedTimes = DEFAULT_SPEED_TIMES;
}
 
Example 23
Project: ditb   File: TestBulkDeleteProtocol.java   View source code 6 votes vote down vote up
public void testBulkDeleteFamily() throws Throwable {
  TableName tableName = TableName.valueOf("testBulkDeleteFamily");
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor(FAMILY1));
  htd.addFamily(new HColumnDescriptor(FAMILY2));
  TEST_UTIL.getHBaseAdmin().createTable(htd, Bytes.toBytes(0), Bytes.toBytes(120), 5);
  Table ht = new HTable(TEST_UTIL.getConfiguration(), tableName);
  List<Put> puts = new ArrayList<Put>(100);
  for (int j = 0; j < 100; j++) {
    Put put = new Put(Bytes.toBytes(j));
    put.add(FAMILY1, QUALIFIER1, "v1".getBytes());
    put.add(FAMILY2, QUALIFIER2, "v2".getBytes());
    puts.add(put);
  }
  ht.put(puts);
  Scan scan = new Scan();
  scan.addFamily(FAMILY1);
  // Delete the column family cf1
  long noOfRowsDeleted = invokeBulkDeleteProtocol(tableName, scan, 500, DeleteType.FAMILY, null);
  assertEquals(100, noOfRowsDeleted);
  int rows = 0;
  for (Result result : ht.getScanner(new Scan())) {
    assertTrue(result.getFamilyMap(FAMILY1).isEmpty());
    assertEquals(1, result.getColumnCells(FAMILY2, QUALIFIER2).size());
    rows++;
  }
  assertEquals(100, rows);
  ht.close();
}
 
Example 24
Project: ditb   File: TestHBaseFsck.java   View source code 5 votes vote down vote up
@Test (timeout=180000)
public void testTableWithNoRegions() throws Exception {
  // We might end up with empty regions in a table
  // see also testNoHdfsTable()
  TableName table =
      TableName.valueOf(name.getMethodName());
  try {
    // create table with one region
    HTableDescriptor desc = new HTableDescriptor(table);
    HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toString(FAM));
    desc.addFamily(hcd); // If a table has no CF's it doesn't get checked
    createTable(TEST_UTIL, desc, null);
    tbl = (HTable) connection.getTable(table, tableExecutorService);

    // Mess it up by leaving a hole in the assignment, meta, and hdfs data
    deleteRegion(conf, tbl.getTableDescriptor(), HConstants.EMPTY_START_ROW,
        HConstants.EMPTY_END_ROW, false, false, true);

    HBaseFsck hbck = doFsck(conf, false);
    assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NOT_IN_HDFS });

    doFsck(conf, true);

    // fix hole
    doFsck(conf, true);

    // check that hole fixed
    assertNoErrors(doFsck(conf, false));
  } finally {
    cleanupTable(table);
  }

}
 
Example 25
Project: ditb   File: TestRowCountEndpoint.java   View source code 5 votes vote down vote up
public void testEndpoint() throws Throwable {
  Table table = new HTable(CONF, TEST_TABLE);

  // insert some test rows
  for (int i=0; i<5; i++) {
    byte[] iBytes = Bytes.toBytes(i);
    Put p = new Put(iBytes);
    p.add(TEST_FAMILY, TEST_COLUMN, iBytes);
    table.put(p);
  }

  final ExampleProtos.CountRequest request = ExampleProtos.CountRequest.getDefaultInstance();
  Map<byte[],Long> results = table.coprocessorService(ExampleProtos.RowCountService.class,
      null, null,
      new Batch.Call<ExampleProtos.RowCountService,Long>() {
        public Long call(ExampleProtos.RowCountService counter) throws IOException {
          ServerRpcController controller = new ServerRpcController();
          BlockingRpcCallback<ExampleProtos.CountResponse> rpcCallback =
              new BlockingRpcCallback<ExampleProtos.CountResponse>();
          counter.getRowCount(controller, request, rpcCallback);
          ExampleProtos.CountResponse response = rpcCallback.get();
          if (controller.failedOnException()) {
            throw controller.getFailedOn();
          }
          return (response != null && response.hasCount()) ? response.getCount() : 0;
        }
      });
  // should be one region with results
  assertEquals(1, results.size());
  Iterator<Long> iter = results.values().iterator();
  Long val = iter.next();
  assertNotNull(val);
  assertEquals(5l, val.longValue());
}
 
Example 26
Project: flume-release-1.7.0   File: TestAsyncHBaseSink.java   View source code 5 votes vote down vote up
@Test
public void testThreeEvents() throws Exception {
  testUtility.createTable(tableName.getBytes(), columnFamily.getBytes());
  deleteTable = true;
  AsyncHBaseSink sink = new AsyncHBaseSink(testUtility.getConfiguration());
  Configurables.configure(sink, ctx);
  Channel channel = new MemoryChannel();
  Configurables.configure(channel, ctx);
  sink.setChannel(channel);
  sink.start();
  Transaction tx = channel.getTransaction();
  tx.begin();
  for (int i = 0; i < 3; i++) {
    Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i));
    channel.put(e);
  }
  tx.commit();
  tx.close();
  Assert.assertFalse(sink.isConfNull());
  sink.process();
  sink.stop();
  HTable table = new HTable(testUtility.getConfiguration(), tableName);
  byte[][] results = getResults(table, 3);
  byte[] out;
  int found = 0;
  for (int i = 0; i < 3; i++) {
    for (int j = 0; j < 3; j++) {
      if (Arrays.equals(results[j], Bytes.toBytes(valBase + "-" + i))) {
        found++;
        break;
      }
    }
  }
  Assert.assertEquals(3, found);
  out = results[3];
  Assert.assertArrayEquals(Longs.toByteArray(3), out);
}
 
Example 27
Project: ditb   File: TestServerCustomProtocol.java   View source code 5 votes vote down vote up
@Test
public void testNullReturn() throws Throwable {
  try (HTable table = new HTable(util.getConfiguration(), TEST_TABLE)) {
    RegionLocator locator = table.getRegionLocator();
    Map<byte[],String> results = hello(table, "nobody", ROW_A, ROW_C);
    verifyRegionResults(locator, results, null, ROW_A);
    verifyRegionResults(locator, results, null, ROW_B);
    verifyRegionResults(locator, results, null, ROW_C);
  }
}
 
Example 28
Project: ditb   File: TestChangingEncoding.java   View source code 5 votes vote down vote up
static void verifyTestDataBatch(Configuration conf, TableName tableName,
    int batchId) throws Exception {
  LOG.debug("Verifying test data batch " + batchId);
  Table table = new HTable(conf, tableName);
  for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
    Get get = new Get(getRowKey(batchId, i));
    Result result = table.get(get);
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      Cell kv = result.getColumnLatestCell(CF_BYTES, getQualifier(j));
      assertTrue(CellUtil.matchingValue(kv, getValue(batchId, i, j)));
    }
  }
  table.close();
}
 
Example 29
Project: ditb   File: HBaseTestingUtility.java   View source code 5 votes vote down vote up
/**
 * Truncate a table using the admin command.
 * Effectively disables, deletes, and recreates the table.
 * @param tableName table which must exist.
 * @param preserveRegions keep the existing split points
 * @return HTable for the new table
 */
public HTable truncateTable(final TableName tableName, final boolean preserveRegions)
    throws IOException {
  Admin admin = getHBaseAdmin();
  if (!admin.isTableDisabled(tableName)) {
    admin.disableTable(tableName);
  }
  admin.truncateTable(tableName, preserveRegions);
  return new HTable(getConfiguration(), tableName);
}
 
Example 30
Project: ditb   File: TestServerCustomProtocol.java   View source code 5 votes vote down vote up
@Test
public void testCompoundCall() throws Throwable {
  try (HTable table = new HTable(util.getConfiguration(), TEST_TABLE)) {
    RegionLocator locator = table.getRegionLocator();
    Map<byte [], String> results = compoundOfHelloAndPing(table, ROW_A, ROW_C);
    verifyRegionResults(locator, results, "Hello, pong", ROW_A);
    verifyRegionResults(locator, results, "Hello, pong", ROW_B);
    verifyRegionResults(locator, results, "Hello, pong", ROW_C);
  }
}
 
Example 31
Project: flume-release-1.7.0   File: TestHBaseSink.java   View source code 5 votes vote down vote up
@Test
public void testTransactionStateOnSerializationException() throws Exception {
  initContextForSimpleHbaseEventSerializer();
  ctx.put("batchSize", "1");
  ctx.put(HBaseSinkConfigurationConstants.CONFIG_SERIALIZER,
          "org.apache.flume.sink.hbase.MockSimpleHbaseEventSerializer");

  HBaseSink sink = new HBaseSink(conf);
  Configurables.configure(sink, ctx);
  // Reset the context to a higher batchSize
  ctx.put("batchSize", "100");
  Channel channel = new MemoryChannel();
  Configurables.configure(channel, new Context());
  sink.setChannel(channel);
  sink.start();
  Transaction tx = channel.getTransaction();
  tx.begin();
  Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + 0));
  channel.put(e);
  tx.commit();
  tx.close();
  try {
    MockSimpleHbaseEventSerializer.throwException = true;
    sink.process();
    Assert.fail("FlumeException expected from serilazer");
  } catch (FlumeException ex) {
    Assert.assertEquals("Exception for testing", ex.getMessage());
  }
  MockSimpleHbaseEventSerializer.throwException = false;
  sink.process();
  sink.stop();
  HTable table = new HTable(conf, tableName);
  byte[][] results = getResults(table, 1);
  byte[] out = results[0];
  Assert.assertArrayEquals(e.getBody(), out);
  out = results[1];
  Assert.assertArrayEquals(Longs.toByteArray(1), out);
}
 
Example 32
Project: ditb   File: TestHFileOutputFormat2.java   View source code 5 votes vote down vote up
/**
 * Test for {@link HFileOutputFormat2#configureDataBlockEncoding(HTableDescriptor, Configuration)}
 * and {@link HFileOutputFormat2#createFamilyDataBlockEncodingMap(Configuration)}.
 * Tests that the compression map is correctly serialized into
 * and deserialized from configuration
 *
 * @throws IOException
 */
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test
public void testSerializeDeserializeFamilyDataBlockEncodingMap() throws IOException {
  for (int numCfs = 0; numCfs <= 3; numCfs++) {
    Configuration conf = new Configuration(this.util.getConfiguration());
    Map<String, DataBlockEncoding> familyToDataBlockEncoding =
        getMockColumnFamiliesForDataBlockEncoding(numCfs);
    Table table = Mockito.mock(HTable.class);
    setupMockColumnFamiliesForDataBlockEncoding(table,
        familyToDataBlockEncoding);
    HTableDescriptor tableDescriptor = table.getTableDescriptor();
    HFileOutputFormat2.configureDataBlockEncoding(tableDescriptor, conf);

    // read back family specific data block encoding settings from the
    // configuration
    Map<byte[], DataBlockEncoding> retrievedFamilyToDataBlockEncodingMap =
        HFileOutputFormat2
        .createFamilyDataBlockEncodingMap(conf);

    // test that we have a value for all column families that matches with the
    // used mock values
    for (Entry<String, DataBlockEncoding> entry : familyToDataBlockEncoding.entrySet()) {
      assertEquals("DataBlockEncoding configuration incorrect for column family:"
          + entry.getKey(), entry.getValue(),
          retrievedFamilyToDataBlockEncodingMap.get(entry.getKey().getBytes()));
    }
  }
}
 
Example 33
Project: Transwarp-Sample-Code   File: udfCheck.java   View source code 5 votes vote down vote up
public static boolean evaluate(String rowkey) {
    try {
        hTable = new HTable(conf, "bi");
        Get get = new Get(Bytes.toBytes(rowkey));
        result = hTable.exists(get);
        return result;
    } catch (Exception e) {
        e.printStackTrace();
    }
    return false;
}
 
Example 34
Project: QDrill   File: HBaseGroupScan.java   View source code 5 votes vote down vote up
private void init() {
  logger.debug("Getting region locations");
  try {
    HTable table = new HTable(storagePluginConfig.getHBaseConf(), hbaseScanSpec.getTableName());
    this.hTableDesc = table.getTableDescriptor();
    NavigableMap<HRegionInfo, ServerName> regionsMap = table.getRegionLocations();
    statsCalculator = new TableStatsCalculator(table, hbaseScanSpec, storagePlugin.getContext().getConfig(), storagePluginConfig);

    boolean foundStartRegion = false;
    regionsToScan = new TreeMap<HRegionInfo, ServerName>();
    for (Entry<HRegionInfo, ServerName> mapEntry : regionsMap.entrySet()) {
      HRegionInfo regionInfo = mapEntry.getKey();
      if (!foundStartRegion && hbaseScanSpec.getStartRow() != null && hbaseScanSpec.getStartRow().length != 0 && !regionInfo.containsRow(hbaseScanSpec.getStartRow())) {
        continue;
      }
      foundStartRegion = true;
      regionsToScan.put(regionInfo, mapEntry.getValue());
      scanSizeInBytes += statsCalculator.getRegionSizeInBytes(regionInfo.getRegionName());
      if (hbaseScanSpec.getStopRow() != null && hbaseScanSpec.getStopRow().length != 0 && regionInfo.containsRow(hbaseScanSpec.getStopRow())) {
        break;
      }
    }

    table.close();
  } catch (IOException e) {
    throw new DrillRuntimeException("Error getting region info for table: " + hbaseScanSpec.getTableName(), e);
  }
  verifyColumns();
}
 
Example 35
Project: ditb   File: TestServerCustomProtocol.java   View source code 5 votes vote down vote up
@Test
public void testEmptyReturnType() throws Throwable {
  try (HTable table = new HTable(util.getConfiguration(), TEST_TABLE)) {
    Map<byte[],String> results = noop(table, ROW_A, ROW_C);
    assertEquals("Should have results from three regions", 3, results.size());
    // all results should be null
    for (Object v : results.values()) {
      assertNull(v);
    }
  }
}
 
Example 36
Project: QDrill   File: TestTableGenerator.java   View source code 5 votes vote down vote up
public static void generateHBaseDatasetCompositeKeyInt(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  int startVal = 0;
  int stopVal = 1000;
  int interval = 47;
  long counter = 0;
  for (int i = startVal; i < stopVal; i += interval, counter ++) {
    byte[] rowKey = ByteBuffer.allocate(12).putInt(i).array();

    for(int j = 0; j < 8; ++j) {
      rowKey[4 + j] = (byte)(counter >> (56 - (j * 8)));
    }

    Put p = new Put(rowKey);
    p.add(FAMILY_F, COLUMN_C, "dummy".getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();
}
 
Example 37
Project: QDrill   File: TestTableGenerator.java   View source code 5 votes vote down vote up
public static void generateHBaseDatasetDoubleOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
Example 38
Project: ditb   File: TestGetLastFlushedSequenceId.java   View source code 5 votes vote down vote up
@Test
public void test() throws IOException, InterruptedException {
  testUtil.getHBaseAdmin().createNamespace(
    NamespaceDescriptor.create(tableName.getNamespaceAsString()).build());
  HTable table = testUtil.createTable(tableName, families);
  table.put(new Put(Bytes.toBytes("k")).add(family, Bytes.toBytes("q"), Bytes.toBytes("v")));
  table.flushCommits();
  MiniHBaseCluster cluster = testUtil.getMiniHBaseCluster();
  List<JVMClusterUtil.RegionServerThread> rsts = cluster.getRegionServerThreads();
  Region region = null;
  for (int i = 0; i < cluster.getRegionServerThreads().size(); i++) {
    HRegionServer hrs = rsts.get(i).getRegionServer();
    for (Region r : hrs.getOnlineRegions(tableName)) {
      region = r;
      break;
    }
  }
  assertNotNull(region);
  Thread.sleep(2000);
  RegionStoreSequenceIds ids =
      testUtil.getHBaseCluster().getMaster()
          .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
  assertEquals(HConstants.NO_SEQNUM, ids.getLastFlushedSequenceId());
  // This will be the sequenceid just before that of the earliest edit in memstore.
  long storeSequenceId = ids.getStoreSequenceId(0).getSequenceId();
  assertTrue(storeSequenceId > 0);
  testUtil.getHBaseAdmin().flush(tableName);
  Thread.sleep(2000);
  ids =
      testUtil.getHBaseCluster().getMaster()
          .getLastSequenceId(region.getRegionInfo().getEncodedNameAsBytes());
  assertTrue(ids.getLastFlushedSequenceId() + " > " + storeSequenceId,
    ids.getLastFlushedSequenceId() > storeSequenceId);
  assertEquals(ids.getLastFlushedSequenceId(), ids.getStoreSequenceId(0).getSequenceId());
  table.close();
}
 
Example 39
Project: QDrill   File: TestTableGenerator.java   View source code 5 votes vote down vote up
public static void generateHBaseDatasetIntOB(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (int i = -49; i <= 100; i ++) {
    byte[] bytes = new byte[5];
    org.apache.hadoop.hbase.util.PositionedByteRange br =
            new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 5);
    org.apache.hadoop.hbase.util.OrderedBytes.encodeInt32(br, i,
            org.apache.hadoop.hbase.util.Order.ASCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %d", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}
 
Example 40
Project: QDrill   File: TestTableGenerator.java   View source code 5 votes vote down vote up
public static void generateHBaseDatasetDoubleOBDesc(HBaseAdmin admin, String tableName, int numberRegions) throws Exception {
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }

  HTableDescriptor desc = new HTableDescriptor(tableName);
  desc.addFamily(new HColumnDescriptor(FAMILY_F));

  if (numberRegions > 1) {
    admin.createTable(desc, Arrays.copyOfRange(SPLIT_KEYS, 0, numberRegions-1));
  } else {
    admin.createTable(desc);
  }

  HTable table = new HTable(admin.getConfiguration(), tableName);

  for (double i = 0.5; i <= 100.00; i += 0.75) {
      byte[] bytes = new byte[9];
      org.apache.hadoop.hbase.util.PositionedByteRange br =
              new org.apache.hadoop.hbase.util.SimplePositionedByteRange(bytes, 0, 9);
      org.apache.hadoop.hbase.util.OrderedBytes.encodeFloat64(br, i,
              org.apache.hadoop.hbase.util.Order.DESCENDING);
    Put p = new Put(bytes);
    p.add(FAMILY_F, COLUMN_C, String.format("value %03f", i).getBytes());
    table.put(p);
  }

  table.flushCommits();
  table.close();

  admin.flush(tableName);
}