Java Code Examples for org.apache.hadoop.hbase.client.Table

The following examples show how to use org.apache.hadoop.hbase.client.Table. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: super-cloudops   Source File: HfileBulkImporter.java    License: Apache License 2.0 7 votes vote down vote up
/**
 * e.g.</br>
 * 
 * <pre>
 *  yarn jar super-devops-tool-hbase-migrator-master.jar \
 *  com.wl4g.devops.tool.hbase.migrator.HfileBulkImporter \
 *  -z emr-header-1:2181 \
 *  -t safeclound.tb_elec_power \
 *  -p /tmp-devops/safeclound.tb_elec_power
 * </pre>
 * 
 * @param args
 * @throws Exception
 */
public static void main(String[] args) throws Exception {
	HbaseMigrateUtils.showBanner();

	CommandLine line = new Builder().option("z", "zkaddr", null, "Zookeeper address.")
			.option("t", "tabname", null, "Hbase table name.")
			.option("p", "path", null, "Data hdfs path to be import. e.g. hdfs://localhost:9000/bak/safeclound.tb_air")
			.build(args);

	Configuration cfg = HBaseConfiguration.create();
	cfg.set("hbase.zookeeper.quorum", line.getOptionValue("z"));
	Connection conn = ConnectionFactory.createConnection(cfg);
	Admin admin = conn.getAdmin();
	Table table = conn.getTable(TableName.valueOf(line.getOptionValue("t")));
	LoadIncrementalHFiles load = new LoadIncrementalHFiles(cfg);
	load.doBulkLoad(new Path(line.getOptionValue("p")), admin, table,
			conn.getRegionLocator(TableName.valueOf(line.getOptionValue("t"))));
}
 
Example 2
Source Project: phoenix   Source File: ServerUtil.java    License: Apache License 2.0 6 votes vote down vote up
private static Table getTableFromSingletonPool(RegionCoprocessorEnvironment env, TableName tableName) throws IOException {
    // It's ok to not ever do a pool.close() as we're storing a single
    // table only. The HTablePool holds no other resources that this table
    // which will be closed itself when it's no longer needed.
    Connection conn = ConnectionFactory.getConnection(ConnectionType.DEFAULT_SERVER_CONNECTION, env);
    try {
        return conn.getTable(tableName);
    } catch (RuntimeException t) {
        // handle cases that an IOE is wrapped inside a RuntimeException like HTableInterface#createHTableInterface
        if(t.getCause() instanceof IOException) {
            throw (IOException)t.getCause();
        } else {
            throw t;
        }
    }
}
 
Example 3
Source Project: java-docs-samples   Source File: Reads.java    License: Apache License 2.0 6 votes vote down vote up
public static void readRow(String projectId, String instanceId, String tableId) {
  // Initialize client that will be used to send requests. This client only needs to be created
  // once, and can be reused for multiple requests. After completing all of your requests, call
  // the "close" method on the client to safely clean up any remaining background resources.
  try (Connection connection = BigtableConfiguration.connect(projectId, instanceId)) {
    Table table = connection.getTable(TableName.valueOf(tableId));

    byte[] rowkey = Bytes.toBytes("phone#4c410523#20190501");

    Result row = table.get(new Get(rowkey));
    printRow(row);

  } catch (IOException e) {
    System.out.println(
        "Unable to initialize service client, as a network error occurred: \n" + e.toString());
  }
}
 
Example 4
Source Project: hbase   Source File: TestEndToEndSplitTransaction.java    License: Apache License 2.0 6 votes vote down vote up
/** verify region boundaries obtained from HTable.getStartEndKeys() */
void verifyRegionsUsingHTable() throws IOException {
  Table table = null;
  try {
    // HTable.getStartEndKeys()
    table = connection.getTable(tableName);

    try (RegionLocator rl = connection.getRegionLocator(tableName)) {
      Pair<byte[][], byte[][]> keys = rl.getStartEndKeys();
      verifyStartEndKeys(keys);

      Set<RegionInfo> regions = new TreeSet<>(RegionInfo.COMPARATOR);
      for (HRegionLocation loc : rl.getAllRegionLocations()) {
        regions.add(loc.getRegion());
      }
      verifyTableRegions(regions);
    }

  } finally {
    IOUtils.closeQuietly(table);
  }
}
 
Example 5
Source Project: kylin-on-parquet-v2   Source File: HBaseResourceStore.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void deleteResourceImpl(String resPath, long timestamp) throws IOException {
    Table table = getConnection().getTable(TableName.valueOf(tableName));
    try {
        boolean hdfsResourceExist = isHdfsResourceExist(table, resPath);
        long origLastModified = getResourceLastModified(table, resPath);
        if (checkTimeStampBeforeDelete(origLastModified, timestamp)) {
            Delete del = new Delete(Bytes.toBytes(resPath));
            table.delete(del);

            if (hdfsResourceExist) { // remove hdfs cell value
                deletePushdown(resPath);
            }
        } else {
            throw new IOException("Resource " + resPath + " timestamp not match, [originLastModified: "
                    + origLastModified + ", timestampToDelete: " + timestamp + "]");
        }

    } finally {
        IOUtils.closeQuietly(table);
    }
}
 
Example 6
Source Project: hbase   Source File: TestMetaTableAccessor.java    License: Apache License 2.0 6 votes vote down vote up
public static void assertMetaLocation(Table meta, byte[] row, ServerName serverName, long seqNum,
  int replicaId, boolean checkSeqNum) throws IOException {
  Get get = new Get(row);
  Result result = meta.get(get);
  assertTrue(Bytes.equals(
    result.getValue(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getServerColumn(replicaId)),
    Bytes.toBytes(serverName.getAddress().toString())));
  assertTrue(Bytes.equals(
    result.getValue(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getStartCodeColumn(replicaId)),
    Bytes.toBytes(serverName.getStartcode())));
  if (checkSeqNum) {
    assertTrue(Bytes.equals(
      result.getValue(HConstants.CATALOG_FAMILY, CatalogFamilyFormat.getSeqNumColumn(replicaId)),
      Bytes.toBytes(seqNum)));
  }
}
 
Example 7
Source Project: hbase   Source File: TestChangingEncoding.java    License: Apache License 2.0 6 votes vote down vote up
static void writeTestDataBatch(TableName tableName,
    int batchId) throws Exception {
  LOG.debug("Writing test data batch " + batchId);
  List<Put> puts = new ArrayList<>();
  for (int i = 0; i < NUM_ROWS_PER_BATCH; ++i) {
    Put put = new Put(getRowKey(batchId, i));
    for (int j = 0; j < NUM_COLS_PER_ROW; ++j) {
      put.addColumn(CF_BYTES, getQualifier(j), getValue(batchId, i, j));
    }
    put.setDurability(Durability.SKIP_WAL);
    puts.add(put);
  }
  try (Connection conn = ConnectionFactory.createConnection(conf);
      Table table = conn.getTable(tableName)) {
    table.put(puts);
  }
}
 
Example 8
Source Project: hgraphdb   Source File: HBaseIndexTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPopulateVertexIndex() throws Exception {
    assertEquals(0, count(graph.vertices()));
    graph.addVertex(T.id, id(10), T.label, "a", "key1", 11);
    graph.addVertex(T.id, id(11), T.label, "a", "key1", 12);
    graph.addVertex(T.id, id(12), T.label, "a", "key2", 12);
    graph.addVertex(T.id, id(13), T.label, "a", "key1", 11);
    graph.addVertex(T.id, id(14), T.label, "b", "key1", 11);


    HBaseGraphConfiguration hconf = graph.configuration();
    Connection conn = graph.connection();
    Table table = conn.getTable(HBaseGraphUtils.getTableName(hconf, Constants.VERTEX_INDICES));

    graph.createIndex(ElementType.VERTEX, "a", "key1", false, true, false);
    verifyTableCount(table, 3);

    table.close();
}
 
Example 9
private HStore prepareData() throws IOException {
  Admin admin = TEST_UTIL.getAdmin();
  if (admin.tableExists(tableName)) {
    admin.disableTable(tableName);
    admin.deleteTable(tableName);
  }
  Table table = TEST_UTIL.createTable(tableName, family);
  for (int i = 0; i < 10; i++) {
    for (int j = 0; j < 10; j++) {
      byte[] value = new byte[128 * 1024];
      ThreadLocalRandom.current().nextBytes(value);
      table.put(new Put(Bytes.toBytes(i * 10 + j)).addColumn(family, qualifier, value));
    }
    admin.flush(tableName);
  }
  return getStoreWithName(tableName);
}
 
Example 10
Source Project: hbase   Source File: BackupSystemTable.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Exclusive operations are: create, delete, merge
 * @throws IOException if a table operation fails or an active backup exclusive operation is
 *           already underway
 */
public void startBackupExclusiveOperation() throws IOException {
  LOG.debug("Start new backup exclusive operation");

  try (Table table = connection.getTable(tableName)) {
    Put put = createPutForStartBackupSession();
    // First try to put if row does not exist
    if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
        .ifNotExists().thenPut(put)) {
      // Row exists, try to put if value == ACTIVE_SESSION_NO
      if (!table.checkAndMutate(ACTIVE_SESSION_ROW, SESSIONS_FAMILY).qualifier(ACTIVE_SESSION_COL)
          .ifEquals(ACTIVE_SESSION_NO).thenPut(put)) {
        throw new ExclusiveOperationException();
      }
    }
  }
}
 
Example 11
Source Project: hbase   Source File: TestDeleteMobTable.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDeleteNonMobTable() throws Exception {
  final TableName tableName = TableName.valueOf(name.getMethodName());
  TableDescriptor htd = createTableDescriptor(tableName, false);
  ColumnFamilyDescriptor hcd = htd.getColumnFamily(FAMILY);

  Table table = createTableWithOneFile(htd);
  try {
    // the mob file doesn't exist
    Assert.assertEquals(0, countMobFiles(tableName, hcd.getNameAsString()));
    Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd.getNameAsString()));
    Assert.assertFalse(mobTableDirExist(tableName));
  } finally {
    table.close();
    TEST_UTIL.deleteTable(tableName);
  }

  Assert.assertFalse(TEST_UTIL.getAdmin().tableExists(tableName));
  Assert.assertEquals(0, countMobFiles(tableName, hcd.getNameAsString()));
  Assert.assertEquals(0, countArchiveMobFiles(tableName, hcd.getNameAsString()));
  Assert.assertFalse(mobTableDirExist(tableName));
}
 
Example 12
Source Project: hbase   Source File: TestSplitWALProcedure.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testHandleDeadWorker() throws Exception {
  Table table = TEST_UTIL.createTable(TABLE_NAME, FAMILY, TEST_UTIL.KEYS_FOR_HBA_CREATE_TABLE);
  for (int i = 0; i < 10; i++) {
    TEST_UTIL.loadTable(table, FAMILY);
  }
  HRegionServer testServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
  ProcedureExecutor<MasterProcedureEnv> masterPE = master.getMasterProcedureExecutor();
  List<FileStatus> wals = splitWALManager.getWALsToSplit(testServer.getServerName(), false);
  Assert.assertEquals(1, wals.size());
  TEST_UTIL.getHBaseCluster().killRegionServer(testServer.getServerName());
  TEST_UTIL.waitFor(30000, () -> master.getProcedures().stream()
      .anyMatch(procedure -> procedure instanceof SplitWALProcedure));
  Procedure splitWALProcedure = master.getProcedures().stream()
      .filter(procedure -> procedure instanceof SplitWALProcedure).findAny().get();
  Assert.assertNotNull(splitWALProcedure);
  TEST_UTIL.waitFor(5000, () -> ((SplitWALProcedure) splitWALProcedure).getWorker() != null);
  TEST_UTIL.getHBaseCluster()
      .killRegionServer(((SplitWALProcedure) splitWALProcedure).getWorker());
  ProcedureTestingUtility.waitProcedure(masterPE, splitWALProcedure.getProcId());
  Assert.assertTrue(splitWALProcedure.isSuccess());
  ProcedureTestingUtility.waitAllProcedures(masterPE);
}
 
Example 13
Source Project: kylin   Source File: HBaseResourceStore.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void deleteResourceImpl(String resPath) throws IOException {
    Table table = getConnection().getTable(TableName.valueOf(tableName));
    try {
        boolean hdfsResourceExist = isHdfsResourceExist(table, resPath);

        Delete del = new Delete(Bytes.toBytes(resPath));
        table.delete(del);

        if (hdfsResourceExist) { // remove hdfs cell value
            deletePushdown(resPath);
        }
    } finally {
        IOUtils.closeQuietly(table);
    }
}
 
Example 14
@Test
public void twoRegionsWithMemstores() throws Exception {
  List<String> names = new ArrayList<String>();
  names.add("COL1");
  names.add("COL2");
  config.set(MRConstants.SPLICE_SCAN_INFO, sqlUtil.getTableScannerBuilder(SMRecordReaderImplIT.class.getSimpleName() + ".C", names).base64Encode());
  SMRecordReaderImpl rr = new SMRecordReaderImpl(config);
  TableName tableName = TableName.valueOf(sqlUtil.getConglomID(SMRecordReaderImplIT.class.getSimpleName() + ".C"));
  try (Connection connection = ConnectionFactory.createConnection(config)) {
    Table table = connection.getTable(tableName);
    Scan scan = new Scan();
    rr.setHTable(table);
    rr.setScan(scan);
    SMSplit tableSplit = new SMSplit(new TableSplit(tableName, scan.getStartRow(), scan.getStopRow(), "sdfsdf"));
    rr.initialize(tableSplit, null);
    int i = 0;
    while (rr.nextKeyValue()) {
      i++;
      Assert.assertNotNull("Column 1 is null", rr.getCurrentValue().getColumn(1));
      Assert.assertNotNull("Column 2 is null", rr.getCurrentValue().getColumn(2));
      Assert.assertNotNull("Current Key is null", rr.getCurrentKey());
    }
    Assert.assertEquals("incorrect results returned", 10000, i);
  }
}
 
Example 15
Source Project: hbase   Source File: TestVisibilityLabelsWithACL.java    License: Apache License 2.0 6 votes vote down vote up
private static Table createTableAndWriteDataWithLabels(TableName tableName, String... labelExps)
    throws Exception {
  Table table = null;
  try {
    table = TEST_UTIL.createTable(tableName, fam);
    int i = 1;
    List<Put> puts = new ArrayList<>(labelExps.length);
    for (String labelExp : labelExps) {
      Put put = new Put(Bytes.toBytes("row" + i));
      put.addColumn(fam, qual, HConstants.LATEST_TIMESTAMP, value);
      put.setCellVisibility(new CellVisibility(labelExp));
      puts.add(put);
      i++;
    }
    table.put(puts);
  } finally {
    if (table != null) {
      table.close();
    }
  }
  return table;
}
 
Example 16
Source Project: phoenix   Source File: IndexToolForDeleteBeforeRebuildIT.java    License: Apache License 2.0 6 votes vote down vote up
@Test
/**
 * Test delete before rebuild
 */
public void testDeleteBeforeRebuildForGlobalIndex() throws Exception {
    conn.createStatement().execute(String.format(INDEX_GLOBAL_DDL, globalIndexName, dataTableFullName));
    String globalIndexUpsert = String.format(UPSERT_SQL, globalIndexFullName);
    PreparedStatement stmt = conn.prepareStatement(globalIndexUpsert);
    upsertRow(stmt, "tenantID1",11, "name11", 99911);
    conn.commit();

    ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices();
    PTable physicalTable = PhoenixRuntime.getTable(conn, globalIndexFullName);
    Table hIndexTable= queryServices.getTable(physicalTable.getPhysicalName().getBytes());
    int count = getUtility().countRows(hIndexTable);
    // Confirm index has rows.
    assertEquals(4, count);

    runIndexTool(schemaName, dataTableName, globalIndexName, 0);

    count = getUtility().countRows(hIndexTable);

    // Confirm index has all the data rows
    assertEquals(3, count);
}
 
Example 17
Source Project: phoenix-tephra   Source File: DataJanitorState.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Return regions that were recorded as empty after the given time.
 *
 * @param time time in milliseconds
 * @param includeRegions If not null, the returned set will be an intersection of the includeRegions set
 *                       and the empty regions after the given time
 */
public SortedSet<byte[]> getEmptyRegionsAfterTime(long time, @Nullable SortedSet<byte[]> includeRegions)
  throws IOException {
  SortedSet<byte[]> emptyRegions = new TreeSet<>(Bytes.BYTES_COMPARATOR);
  try (Table stateTable = stateTableSupplier.get()) {
    Scan scan = new Scan(makeEmptyRegionTimeKey(Bytes.toBytes(time + 1), EMPTY_BYTE_ARRAY),
                         EMPTY_REGION_TIME_KEY_PREFIX_STOP);
    scan.addColumn(FAMILY, EMPTY_REGION_TIME_COL);

    try (ResultScanner scanner = stateTable.getScanner(scan)) {
      Result next;
      while ((next = scanner.next()) != null) {
        byte[] emptyRegion = getEmptyRegionFromKey(next.getRow());
        if (includeRegions == null || includeRegions.contains(emptyRegion)) {
          emptyRegions.add(emptyRegion);
        }
      }
    }
  }
  return Collections.unmodifiableSortedSet(emptyRegions);
}
 
Example 18
Source Project: phoenix   Source File: TephraTransactionContext.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Table getTransactionalTableWriter(PhoenixConnection connection, PTable table, Table htable, boolean isIndex) throws SQLException {
    // If we have indexes, wrap the HTable in a delegate HTable that
    // will attach the necessary index meta data in the event of a
    // rollback
    TransactionAwareHTable transactionAwareHTable;
    // Don't add immutable indexes (those are the only ones that would participate
    // during a commit), as we don't need conflict detection for these.
    if (isIndex) {
        transactionAwareHTable = new TransactionAwareHTable(htable, TxConstants.ConflictDetection.NONE);
        transactionAwareHTable.startTx(getTransaction());
    } else {
        htable = new RollbackHookHTableWrapper(htable, table, connection);
        transactionAwareHTable = new TransactionAwareHTable(htable, table.isImmutableRows() ? TxConstants.ConflictDetection.NONE : TxConstants.ConflictDetection.ROW);
        // Even for immutable, we need to do this so that an abort has the state
        // necessary to generate the rows to delete.
        this.addTransactionAware(transactionAwareHTable);
    }
    return transactionAwareHTable;
}
 
Example 19
Source Project: phoenix   Source File: UngroupedAggregateRegionObserver.java    License: Apache License 2.0 6 votes vote down vote up
private void separateLocalAndRemoteMutations(Table targetHTable, Region region, List<Mutation> mutations,
                                             List<Mutation> localRegionMutations, List<Mutation> remoteRegionMutations,
                                             boolean isPKChanging){
    boolean areMutationsInSameTable = areMutationsInSameTable(targetHTable, region);
    //if we're writing to the same table, but the PK can change, that means that some
    //mutations might be in our current region, and others in a different one.
    if (areMutationsInSameTable && isPKChanging) {
        RegionInfo regionInfo = region.getRegionInfo();
        for (Mutation mutation : mutations){
            if (regionInfo.containsRow(mutation.getRow())){
                localRegionMutations.add(mutation);
            } else {
                remoteRegionMutations.add(mutation);
            }
        }
    } else if (areMutationsInSameTable && !isPKChanging) {
        localRegionMutations.addAll(mutations);
    } else {
        remoteRegionMutations.addAll(mutations);
    }
}
 
Example 20
Source Project: hraven   Source File: JobHistoryRawService.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Returns the raw job configuration stored for the given cluster and job ID
 * @param jobId the cluster and job ID to look up
 * @return the stored job configuration
 * @throws IOException
 */
public Configuration getRawJobConfiguration(QualifiedJobId jobId)
    throws IOException {
  Configuration conf = null;
  byte[] rowKey = idConv.toBytes(jobId);
  Get get = new Get(rowKey);
  get.addColumn(Constants.RAW_FAM_BYTES, Constants.JOBCONF_COL_BYTES);
  Table rawTable = null;
  try {
    rawTable = hbaseConnection
        .getTable(TableName.valueOf(Constants.HISTORY_RAW_TABLE));
    Result result = rawTable.get(get);
    if (result != null && !result.isEmpty()) {
      conf = createConfigurationFromResult(result);
    }
  } catch (MissingColumnInResultException e) {
    LOG.error(
        "Failed to retrieve configuration from row returned for " + jobId, e);
  } finally {
    if (rawTable != null) {
      rawTable.close();
    }
  }
  return conf;
}
 
Example 21
Source Project: hbase   Source File: TestVisibilityLabelsWithACL.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGetForSuperUserWithFewerLabelAuths() throws Throwable {
  String[] auths = { SECRET };
  String user = "admin";
  VisibilityClient.setAuths(TEST_UTIL.getConnection(), auths, user);
  TableName tableName = TableName.valueOf(TEST_NAME.getMethodName());
  final Table table = createTableAndWriteDataWithLabels(tableName, SECRET + "&" + CONFIDENTIAL
      + "&!" + PRIVATE, SECRET + "&!" + PRIVATE);
  PrivilegedExceptionAction<Void> scanAction = new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      Get g = new Get(row1);
      g.setAuthorizations(new Authorizations(SECRET, CONFIDENTIAL));
      try (Connection connection = ConnectionFactory.createConnection(conf);
           Table t = connection.getTable(table.getName())) {
        Result result = t.get(g);
        assertTrue(!result.isEmpty());
      }
      return null;
    }
  };
  SUPERUSER.runAs(scanAction);
}
 
Example 22
private void verifyReplicationStuck() throws Exception {
  try (Table normalTable = utility1.getConnection().getTable(TABLE)) {
    Put put = new Put(ROW);
    put.addColumn(NORMAL_FAMILY, QUALIFIER, VALUE);
    normalTable.put(put);
  }
  try (Table normalTable = utility2.getConnection().getTable(TABLE)) {
    for (int i = 0; i < NB_RETRIES; i++) {
      Result result = normalTable.get(new Get(ROW).addColumn(NORMAL_FAMILY, QUALIFIER));
      if (result != null && !result.isEmpty()) {
        fail("Edit should have been stuck behind dropped tables, but value is " + Bytes
            .toString(result.getValue(NORMAL_FAMILY, QUALIFIER)));
      } else {
        LOG.info("Row not replicated, let's wait a bit more...");
        Thread.sleep(SLEEP_TIME);
      }
    }
  }
}
 
Example 23
Source Project: hbase   Source File: TestShadedHBaseTestingUtility.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateTable() throws Exception {
  TableName tableName = TableName.valueOf("test");

  Table table = TEST_UTIL.createTable(tableName, "cf");

  Put put1 = new Put(Bytes.toBytes("r1"));
  put1.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("c"), Bytes.toBytes(1));
  table.put(put1);

  Put put2 = new Put(Bytes.toBytes("r2"));
  put2.addColumn(Bytes.toBytes("cf"), Bytes.toBytes("c"), Bytes.toBytes(2));
  table.put(put2);

  int rows = TEST_UTIL.countRows(tableName);
  assertEquals(2, rows);
}
 
Example 24
Source Project: nifi   Source File: TestHBase_2_ClientService.java    License: Apache License 2.0 6 votes vote down vote up
@Test(expected = IllegalArgumentException.class)
public void testScanWithInvalidFilter() throws InitializationException, IOException {
    final String tableName = "nifi";
    final TestRunner runner = TestRunners.newTestRunner(TestProcessor.class);

    // Mock an HBase Table so we can verify the put operations later
    final Table table = Mockito.mock(Table.class);
    when(table.getName()).thenReturn(TableName.valueOf(tableName));

    // create the controller service and link it to the test processor
    final MockHBaseClientService service = configureHBaseClientService(runner, table);
    runner.assertValid(service);

    // perform a scan and verify the four rows were returned
    final CollectingResultHandler handler = new CollectingResultHandler();
    final HBaseClientService hBaseClientService = runner.getProcessContext().getProperty(TestProcessor.HBASE_CLIENT_SERVICE)
            .asControllerService(HBaseClientService.class);

    // this should throw IllegalArgumentException
    final String filter = "this is not a filter";
    hBaseClientService.scan(tableName, new ArrayList<Column>(), filter, System.currentTimeMillis(), handler);
}
 
Example 25
Source Project: hbase   Source File: TestMobWithByteBuffAllocator.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testReadingCellsFromHFile() throws Exception {
  TableName tableName = TableName.valueOf(TABLE_NAME);
  MobSnapshotTestingUtils.createMobTable(UTIL, tableName, 1, FAMILY);
  LOG.info("Create an mob table {} successfully.", tableName);

  int expectedRows = 500;
  SnapshotTestingUtils.loadData(UTIL, tableName, expectedRows, FAMILY);
  LOG.info("Load 500 rows data into table {} successfully.", tableName);

  // Flush all the data into HFiles.
  try (Admin admin = UTIL.getConnection().getAdmin()) {
    admin.flush(tableName);
  }

  // Scan the rows
  MobSnapshotTestingUtils.verifyMobRowCount(UTIL, tableName, expectedRows);

  // Reversed scan the rows
  int rows = 0;
  try (Table table = UTIL.getConnection().getTable(tableName)) {
    try (ResultScanner scanner = table.getScanner(new Scan().setReversed(true))) {
      for (Result res; (res = scanner.next()) != null;) {
        rows++;
        for (Cell cell : res.listCells()) {
          Assert.assertTrue(CellUtil.cloneValue(cell).length > 0);
        }
      }
    }
  }
  Assert.assertEquals(expectedRows, rows);
}
 
Example 26
Source Project: hbase   Source File: HBaseTestingUtility.java    License: Apache License 2.0 5 votes vote down vote up
public Table createTable(TableName tableName, byte[][] families,
    int numVersions, byte[] startKey, byte[] endKey, int numRegions)
throws IOException{
  HTableDescriptor desc = createTableDescriptor(tableName, families, numVersions);

  getAdmin().createTable(desc, startKey, endKey, numRegions);
  // HBaseAdmin only waits for regions to appear in hbase:meta we
  // should wait until they are assigned
  waitUntilAllRegionsAssigned(tableName);
  return getConnection().getTable(tableName);
}
 
Example 27
Source Project: hbase   Source File: TestScannerRetriableFailure.java    License: Apache License 2.0 5 votes vote down vote up
public void loadTable(final Table table, int numRows) throws IOException {
  List<Put> puts = new ArrayList<>(numRows);
  for (int i = 0; i < numRows; ++i) {
    byte[] row = Bytes.toBytes(String.format("%09d", i));
    Put put = new Put(row);
    put.setDurability(Durability.SKIP_WAL);
    put.addColumn(FAMILY_NAME, null, row);
    table.put(put);
  }
}
 
Example 28
Source Project: hbase   Source File: ThriftHBaseServiceHandler.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public boolean exists(ByteBuffer table, TGet get) throws TIOError, TException {
  Table htable = getTable(table);
  try {
    return htable.exists(getFromThrift(get));
  } catch (IOException e) {
    throw getTIOError(e);
  } finally {
    closeTable(htable);
  }
}
 
Example 29
@Test
public void testNegativeAuthentication() throws Exception {
  // Validate that we can read that record back out as the user with our custom auth'n
  UserGroupInformation user1 = UserGroupInformation.createUserForTesting("user1", new String[0]);
  user1.addToken(createPasswordToken("user1", "definitely not the password", clusterId));
  user1.doAs(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      Configuration clientConf = getClientConf();
      clientConf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1);
      // Depending on the registry in use, the following code can throw exceptions at different
      // places. Master registry fails at the createConnection() step because the RPC to the
      // master fails with sasl auth. With ZK registry, connection creation succeeds (since there
      // is no RPC to HBase services involved) but the subsequent get() fails. The root cause
      // should still be a SaslException in both the cases.
      try (Connection conn = ConnectionFactory.createConnection(clientConf);
        Table t = conn.getTable(tableName)) {
        t.get(new Get(Bytes.toBytes("r1")));
        fail("Should not successfully authenticate with HBase");
      } catch (MasterRegistryFetchException mfe) {
        Throwable cause = mfe.getCause();
        assertTrue(cause.getMessage(), cause.getMessage().contains("SaslException"));
      } catch (RetriesExhaustedException re) {
        assertTrue(re.getMessage(), re.getMessage().contains("SaslException"));
      } catch (Exception e) {
        // Any other exception is unexpected.
        fail("Unexpected exception caught, was expecting a authentication error: " +
          Throwables.getStackTraceAsString(e));
      }
      return null;
    }
  });
}
 
Example 30
Source Project: hbase   Source File: BackupSystemTable.java    License: Apache License 2.0 5 votes vote down vote up
public void writeFilesForBulkLoadPreCommit(TableName tabName, byte[] region, final byte[] family,
    final List<Pair<Path, Path>> pairs) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug(
      "write bulk load descriptor to backup " + tabName + " with " + pairs.size() + " entries");
  }
  try (Table table = connection.getTable(bulkLoadTableName)) {
    List<Put> puts =
        BackupSystemTable.createPutForPreparedBulkload(tabName, region, family, pairs);
    table.put(puts);
    LOG.debug("written " + puts.size() + " rows for bulk load of " + tabName);
  }
}