org.apache.hadoop.hbase.client.HTable Java Examples
The following examples show how to use
org.apache.hadoop.hbase.client.HTable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AbstractHBaseTableTest.java From phoenix-tephra with Apache License 2.0 | 6 votes |
protected static HTable createTable(byte[] tableName, byte[][] columnFamilies, boolean existingData, List<String> coprocessors) throws Exception { HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); for (byte[] family : columnFamilies) { HColumnDescriptor columnDesc = new HColumnDescriptor(family); columnDesc.setMaxVersions(Integer.MAX_VALUE); columnDesc.setValue(TxConstants.PROPERTY_TTL, String.valueOf(100000)); // in millis desc.addFamily(columnDesc); } if (existingData) { desc.setValue(TxConstants.READ_NON_TX_DATA, "true"); } // Divide individually to prevent any overflow int priority = Coprocessor.PRIORITY_USER; // order in list is the same order that coprocessors will be invoked for (String coprocessor : coprocessors) { desc.addCoprocessor(coprocessor, null, ++priority, null); } hBaseAdmin.createTable(desc); testUtil.waitTableAvailable(tableName, 5000); return new HTable(testUtil.getConfiguration(), tableName); }
Example #2
Source File: HbaseAdapter.java From canal with Apache License 2.0 | 6 votes |
@Override public Map<String, Object> count(String task) { MappingConfig config = hbaseMapping.get(task); String hbaseTable = config.getHbaseMapping().getHbaseTable(); long rowCount = 0L; try { HTable table = (HTable) hbaseTemplate.getConnection().getTable(TableName.valueOf(hbaseTable)); Scan scan = new Scan(); scan.setFilter(new FirstKeyOnlyFilter()); ResultScanner resultScanner = table.getScanner(scan); for (Result result : resultScanner) { rowCount += result.size(); } } catch (IOException e) { logger.error(e.getMessage(), e); } Map<String, Object> res = new LinkedHashMap<>(); res.put("hbaseTable", hbaseTable); res.put("count", rowCount); return res; }
Example #3
Source File: DataJanitorStateTest.java From phoenix-tephra with Apache License 2.0 | 6 votes |
@Before public void beforeTest() throws Exception { pruneStateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE)); HTable table = createTable(pruneStateTable.getName(), new byte[][]{DataJanitorState.FAMILY}, false, // Prune state table is a non-transactional table, hence no transaction co-processor Collections.<String>emptyList()); table.close(); dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() { @Override public Table get() throws IOException { return testUtil.getConnection().getTable(pruneStateTable); } }); }
Example #4
Source File: Util.java From hbase-tools with Apache License 2.0 | 6 votes |
public static boolean isMoved(HBaseAdmin admin, String tableName, String regionName, String serverNameTarget) { try (HTable table = new HTable(admin.getConfiguration(), tableName)) { NavigableMap<HRegionInfo, ServerName> regionLocations = table.getRegionLocations(); for (Map.Entry<HRegionInfo, ServerName> regionLocation : regionLocations.entrySet()) { if (regionLocation.getKey().getEncodedName().equals(regionName)) { return regionLocation.getValue().getServerName().equals(serverNameTarget); } } if (!existsRegion(regionName, regionLocations.keySet())) return true; // skip moving } catch (IOException e) { return false; } return false; }
Example #5
Source File: ImmutableIndexIT.java From phoenix with Apache License 2.0 | 6 votes |
public static boolean verifyRowsForEmptyColValue(Connection conn, String tableName, byte[] valueBytes) throws IOException, SQLException { PTable table = PhoenixRuntime.getTable(conn, tableName); byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table); byte[] emptyCQ = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst(); HTable htable = (HTable) conn.unwrap(PhoenixConnection.class).getQueryServices().getTable(table.getPhysicalName().getBytes()); Scan scan = new Scan(); scan.addColumn(emptyCF, emptyCQ); ResultScanner resultScanner = htable.getScanner(scan); for (Result result = resultScanner.next(); result != null; result = resultScanner.next()) { if (Bytes.compareTo(result.getValue(emptyCF, emptyCQ), 0, valueBytes.length, valueBytes, 0, valueBytes.length) != 0) { return false; } } return true; }
Example #6
Source File: HBaseTestBase.java From flink with Apache License 2.0 | 6 votes |
private static void createHBaseTable1() throws IOException { // create a table TableName tableName = TableName.valueOf(TEST_TABLE_1); createTable(tableName, FAMILIES, SPLIT_KEYS); // get the HTable instance HTable table = openTable(tableName); List<Put> puts = new ArrayList<>(); // add some data puts.add(putRow(1, 10, "Hello-1", 100L, 1.01, false, "Welt-1")); puts.add(putRow(2, 20, "Hello-2", 200L, 2.02, true, "Welt-2")); puts.add(putRow(3, 30, "Hello-3", 300L, 3.03, false, "Welt-3")); puts.add(putRow(4, 40, null, 400L, 4.04, true, "Welt-4")); puts.add(putRow(5, 50, "Hello-5", 500L, 5.05, false, "Welt-5")); puts.add(putRow(6, 60, "Hello-6", 600L, 6.06, true, "Welt-6")); puts.add(putRow(7, 70, "Hello-7", 700L, 7.07, false, "Welt-7")); puts.add(putRow(8, 80, null, 800L, 8.08, true, "Welt-8")); // append rows to table table.put(puts); table.close(); }
Example #7
Source File: Util.java From hbase-tools with Apache License 2.0 | 6 votes |
public static boolean isMoved(HBaseAdmin admin, String tableName, String regionName, String serverNameTarget) { try (HTable table = new HTable(admin.getConfiguration(), tableName)) { NavigableMap<HRegionInfo, ServerName> regionLocations = table.getRegionLocations(); for (Map.Entry<HRegionInfo, ServerName> regionLocation : regionLocations.entrySet()) { if (regionLocation.getKey().getEncodedName().equals(regionName)) { return regionLocation.getValue().getServerName().equals(serverNameTarget); } } if (!existsRegion(regionName, regionLocations.keySet())) return true; // skip moving } catch (IOException e) { return false; } return false; }
Example #8
Source File: DataJanitorStateTest.java From phoenix-tephra with Apache License 2.0 | 6 votes |
@Before public void beforeTest() throws Exception { pruneStateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE)); HTable table = createTable(pruneStateTable.getName(), new byte[][]{DataJanitorState.FAMILY}, false, // Prune state table is a non-transactional table, hence no transaction co-processor Collections.<String>emptyList()); table.close(); dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() { @Override public Table get() throws IOException { return testUtil.getConnection().getTable(pruneStateTable); } }); }
Example #9
Source File: TestHbaseClient.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws IOException { foo(6, 5); foo(5, 2); foo(3, 0); Configuration conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", "hbase_host"); conf.set("zookeeper.znode.parent", "/hbase-unsecure"); HTable table = new HTable(conf, "test1"); Put put = new Put(Bytes.toBytes("row1")); put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1")); put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual2"), Bytes.toBytes("val2")); table.put(put); table.close(); }
Example #10
Source File: HBaseStreamPartitioner.java From opensoc-streaming with Apache License 2.0 | 6 votes |
public void prepare(WorkerTopologyContext context, GlobalStreamId stream, List<Integer> targetTasks) { System.out.println("preparing HBaseStreamPartitioner for streamId " + stream.get_streamId()); this.targetTasks = targetTasks; this.targetTasksSize = this.targetTasks.size(); Configuration conf = HBaseConfiguration.create(); try { hTable = new HTable(conf, tableName); refreshRegionInfo(tableName); System.out.println("regionStartKeyRegionNameMap: " + regionStartKeyRegionNameMap); } catch (IOException e) { e.printStackTrace(); } }
Example #11
Source File: LocalIndexIT.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testLocalIndexTableRegionSplitPolicyAndSplitKeys() throws Exception { createBaseTable(TestUtil.DEFAULT_DATA_TABLE_NAME, null,"('e','i','o')"); Connection conn1 = DriverManager.getConnection(getUrl()); Connection conn2 = DriverManager.getConnection(getUrl()); conn1.createStatement().execute("CREATE LOCAL INDEX " + TestUtil.DEFAULT_INDEX_TABLE_NAME + " ON " + TestUtil.DEFAULT_DATA_TABLE_NAME + "(v1)"); conn2.createStatement().executeQuery("SELECT * FROM " + TestUtil.DEFAULT_DATA_TABLE_FULL_NAME).next(); HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin(); HTableDescriptor htd = admin.getTableDescriptor(TableName.valueOf(MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME))); assertEquals(IndexRegionSplitPolicy.class.getName(), htd.getValue(HTableDescriptor.SPLIT_POLICY)); try (HTable userTable = new HTable(admin.getConfiguration(),TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME))) { try (HTable indexTable = new HTable(admin.getConfiguration(),TableName.valueOf(MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME)))) { assertArrayEquals("Both user table and index table should have same split keys.", userTable.getStartKeys(), indexTable.getStartKeys()); } } }
Example #12
Source File: MC.java From hbase-tools with Apache License 2.0 | 6 votes |
private NavigableMap<HRegionInfo, ServerName> getRegionLocations(String table) throws IOException { long startTimestamp = System.currentTimeMillis(); Util.printVerboseMessage(args, Util.getMethodName() + " - start"); NavigableMap<HRegionInfo, ServerName> result = regionLocations.get(table); if (result == null) { try (HTable htable = new HTable(admin.getConfiguration(), table)) { result = htable.getRegionLocations(); regionLocations.put(table, result); } } Util.printVerboseMessage(args, Util.getMethodName() + " - end", startTimestamp); return result; }
Example #13
Source File: TestHbaseClient.java From Kylin with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws IOException { foo(6, 5); foo(5, 2); foo(3, 0); Configuration conf = HBaseConfiguration.create(); conf.set("hbase.zookeeper.quorum", "hbase_host"); conf.set("zookeeper.znode.parent", "/hbase-unsecure"); HTable table = new HTable(conf, "test1"); Put put = new Put(Bytes.toBytes("row1")); put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual1"), Bytes.toBytes("val1")); put.add(Bytes.toBytes("colfam1"), Bytes.toBytes("qual2"), Bytes.toBytes("val2")); table.put(put); table.close(); }
Example #14
Source File: MC.java From hbase-tools with Apache License 2.0 | 6 votes |
private NavigableMap<HRegionInfo, ServerName> getRegionLocations(String table) throws IOException { long startTimestamp = System.currentTimeMillis(); Util.printVerboseMessage(args, Util.getMethodName() + " - start"); NavigableMap<HRegionInfo, ServerName> result = regionLocations.get(table); if (result == null) { try (HTable htable = new HTable(admin.getConfiguration(), table)) { result = htable.getRegionLocations(); regionLocations.put(table, result); } } Util.printVerboseMessage(args, Util.getMethodName() + " - end", startTimestamp); return result; }
Example #15
Source File: HBaseBulkImportJob.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 6 votes |
@Override protected void jobSetup(Job job) throws IOException, ImportException { super.jobSetup(job); // we shouldn't have gotten here if bulk load dir is not set // so let's throw a ImportException if(getContext().getDestination() == null){ throw new ImportException("Can't run HBaseBulkImportJob without a " + "valid destination directory."); } TableMapReduceUtil.addDependencyJars(job.getConfiguration(), Preconditions.class); FileOutputFormat.setOutputPath(job, getContext().getDestination()); HTable hTable = new HTable(job.getConfiguration(), options.getHBaseTable()); HFileOutputFormat.configureIncrementalLoad(job, hTable); }
Example #16
Source File: HalyardTableUtils.java From Halyard with Apache License 2.0 | 6 votes |
/** * Truncates HTable while preserving the region pre-splits * @param table HTable to truncate * @return new instance of the truncated HTable * @throws IOException throws IOException in case of any HBase IO problems */ public static HTable truncateTable(HTable table) throws IOException { Configuration conf = table.getConfiguration(); byte[][] presplits = table.getRegionLocator().getStartKeys(); if (presplits.length > 0 && presplits[0].length == 0) { presplits = Arrays.copyOfRange(presplits, 1, presplits.length); } HTableDescriptor desc = table.getTableDescriptor(); table.close(); try (Connection con = ConnectionFactory.createConnection(conf)) { try (Admin admin = con.getAdmin()) { admin.disableTable(desc.getTableName()); admin.deleteTable(desc.getTableName()); admin.createTable(desc, presplits); } } return HalyardTableUtils.getTable(conf, desc.getTableName().getNameAsString(), false, 0); }
Example #17
Source File: HBaseTestCase.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 6 votes |
protected int countHBaseTable(String tableName, String colFamily) throws IOException { int count = 0; HTable table = new HTable(new Configuration( hbaseTestUtil.getConfiguration()), Bytes.toBytes(tableName)); try { ResultScanner scanner = table.getScanner(Bytes.toBytes(colFamily)); for(Result result = scanner.next(); result != null; result = scanner.next()) { count++; } } finally { table.close(); } return count; }
Example #18
Source File: HBaseTestCase.java From aliyun-maxcompute-data-collectors with Apache License 2.0 | 6 votes |
protected void verifyHBaseCell(String tableName, String rowKey, String colFamily, String colName, String val) throws IOException { Get get = new Get(Bytes.toBytes(rowKey)); get.addColumn(Bytes.toBytes(colFamily), Bytes.toBytes(colName)); HTable table = new HTable(new Configuration( hbaseTestUtil.getConfiguration()), Bytes.toBytes(tableName)); try { Result r = table.get(get); byte [] actualVal = r.getValue(Bytes.toBytes(colFamily), Bytes.toBytes(colName)); if (null == val) { assertNull("Got a result when expected null", actualVal); } else { assertNotNull("No result, but we expected one", actualVal); assertEquals(val, Bytes.toString(actualVal)); } } finally { table.close(); } }
Example #19
Source File: TestAsyncHBaseSink.java From mt-flume with Apache License 2.0 | 5 votes |
@Test public void testThreeEvents() throws Exception { testUtility.createTable(tableName.getBytes(), columnFamily.getBytes()); deleteTable = true; AsyncHBaseSink sink = new AsyncHBaseSink(testUtility.getConfiguration()); Configurables.configure(sink, ctx); Channel channel = new MemoryChannel(); Configurables.configure(channel, ctx); sink.setChannel(channel); sink.start(); Transaction tx = channel.getTransaction(); tx.begin(); for(int i = 0; i < 3; i++){ Event e = EventBuilder.withBody(Bytes.toBytes(valBase + "-" + i)); channel.put(e); } tx.commit(); tx.close(); Assert.assertFalse(sink.isConfNull()); sink.process(); sink.stop(); HTable table = new HTable(testUtility.getConfiguration(), tableName); byte[][] results = getResults(table, 3); byte[] out; int found = 0; for(int i = 0; i < 3; i++){ for(int j = 0; j < 3; j++){ if(Arrays.equals(results[j],Bytes.toBytes(valBase + "-" + i))){ found++; break; } } } Assert.assertEquals(3, found); out = results[3]; Assert.assertArrayEquals(Longs.toByteArray(3), out); }
Example #20
Source File: PcapScannerHBaseImplTest.java From opensoc-streaming with Apache License 2.0 | 5 votes |
/** * Test_get pcaps_with minimal arguments. * * @throws IOException * the IO exception */ @SuppressWarnings({ "unchecked", "unused" }) @Test public void test_getPcaps_withMinimalArguments() throws IOException { // mocking PcapScannerHBaseImpl pcapScanner = (PcapScannerHBaseImpl) PcapScannerHBaseImpl .getInstance(); PcapScannerHBaseImpl spy = Mockito.spy(pcapScanner); byte[] cf = "cf".getBytes(); byte[] cq = "pcap".getBytes(); String startKey = "0a07002b-0a078039-06-1e8b-0087"; String endKey = "0a070025-0a07807a-06-aab8-c360"; long maxResultSize = 60; long startTime = 1376782349234555L; long endTime = 1396782349234555L; List<byte[]> mockPcaps = new ArrayList<byte[]>(); mockPcaps.add(getTestPcapBytes()); Mockito .doReturn(mockPcaps) .when(spy) .scanPcaps(Mockito.any(ArrayList.class), Mockito.any(HTable.class), Mockito.any(Scan.class), Mockito.any(byte[].class), Mockito.any(byte[].class)); // actual call byte[] response = spy.getPcaps(startKey, endKey); // verify Assert.assertTrue(response.length == mockPcaps.get(0).length); }
Example #21
Source File: TestBase.java From hbase-tools with Apache License 2.0 | 5 votes |
private long getWriteRequestCountActual(String tableName) throws IOException { long writeRequestCountActual; try (HTable table = (HTable) hConnection.getTable(tableName)) { writeRequestCountActual = 0; NavigableMap<HRegionInfo, ServerName> regionLocations = table.getRegionLocations(); for (Map.Entry<HRegionInfo, ServerName> entry : regionLocations.entrySet()) { ServerLoad serverLoad = admin.getClusterStatus().getLoad(entry.getValue()); for (RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) { if (Arrays.equals(entry.getKey().getRegionName(), regionLoad.getName())) writeRequestCountActual += regionLoad.getWriteRequestsCount(); } } } return writeRequestCountActual; }
Example #22
Source File: TransactionAwareHTableTest.java From phoenix-tephra with Apache License 2.0 | 5 votes |
private void testDeleteRollback(TxConstants.ConflictDetection conflictDetection) throws Exception { String tableName = String.format("%s%s", "TestColFamilyDelete", conflictDetection); HTable hTable = createTable(Bytes.toBytes(tableName), new byte[][]{TestBytes.family}); try (TransactionAwareHTable txTable = new TransactionAwareHTable(hTable, conflictDetection)) { TransactionContext txContext = new TransactionContext(new InMemoryTxSystemClient(txManager), txTable); txContext.start(); txTable.put(new Put(TestBytes.row).add(TestBytes.family, TestBytes.qualifier, TestBytes.value)); txContext.finish(); // Start a tx, delete the row and then abort the tx txContext.start(); txTable.delete(new Delete(TestBytes.row)); txContext.abort(); // Start a tx, delete a column family and then abort the tx txContext.start(); txTable.delete(new Delete(TestBytes.row).deleteFamily(TestBytes.family)); txContext.abort(); // Above operations should have no effect on the row, since they were aborted txContext.start(); Get get = new Get(TestBytes.row); Result result = txTable.get(get); assertFalse(result.isEmpty()); assertArrayEquals(TestBytes.value, result.getValue(TestBytes.family, TestBytes.qualifier)); txContext.finish(); } }
Example #23
Source File: HBaseStore.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
public void flushTables() throws InterruptedIOException, RetriesExhaustedWithDetailsException { if (table != null) { flushTable(table); } for (Map.Entry<String, HTable> entry : tableCache.asMap().entrySet()) { flushTable(entry.getValue()); } }
Example #24
Source File: TestEndToEndCoveredIndexing.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
/** * Test that a bunch of puts with a single timestamp across all the puts builds and inserts index * entries as expected * @throws Exception on failure */ @Test public void testSimpleTimestampedUpdates() throws Exception { HTable primary = createSetupTables(fam1); // do a put to the primary table Put p = new Put(row1); long ts = 10; p.add(FAM, indexed_qualifer, ts, value1); p.add(FAM, regular_qualifer, ts, value2); primary.put(p); primary.flushCommits(); // read the index for the expected values HTable index1 = new HTable(UTIL.getConfiguration(), getIndexTableName()); // build the expected kvs List<Pair<byte[], CoveredColumn>> pairs = new ArrayList<Pair<byte[], CoveredColumn>>(); pairs.add(new Pair<byte[], CoveredColumn>(value1, col1)); pairs.add(new Pair<byte[], CoveredColumn>(EMPTY_BYTES, col2)); List<KeyValue> expected = CoveredColumnIndexCodec.getIndexKeyValueForTesting(row1, ts, pairs); // verify that the index matches IndexTestingUtils.verifyIndexTableAtTimestamp(index1, expected, ts, value1); // cleanup closeAndCleanupTables(primary, index1); }
Example #25
Source File: BalanceTest.java From hbase-tools with Apache License 2.0 | 5 votes |
@Test public void testBalanceAsync() throws Exception { splitTable("a".getBytes()); splitTable("b".getBytes()); splitTable("c".getBytes()); NavigableMap<HRegionInfo, ServerName> regionLocations; List<Map.Entry<HRegionInfo, ServerName>> hRegionInfoList; try (HTable table = getTable(tableName)) { regionLocations = table.getRegionLocations(); hRegionInfoList = new ArrayList<>(regionLocations.entrySet()); Assert.assertEquals(4, regionLocations.size()); Assert.assertEquals(hRegionInfoList.get(0).getValue(), hRegionInfoList.get(1).getValue()); Assert.assertEquals(hRegionInfoList.get(0).getValue(), hRegionInfoList.get(2).getValue()); Assert.assertEquals(hRegionInfoList.get(0).getValue(), hRegionInfoList.get(3).getValue()); String[] argsParam = {"zookeeper", tableName, "rr", "--force-proceed", "--move-async"}; Args args = new ManagerArgs(argsParam); Assert.assertEquals("zookeeper", args.getZookeeperQuorum()); Balance command = new Balance(admin, args); command.run(); regionLocations = table.getRegionLocations(); hRegionInfoList = new ArrayList<>(regionLocations.entrySet()); Assert.assertNotEquals(hRegionInfoList.get(0).getValue(), hRegionInfoList.get(1).getValue()); Assert.assertNotEquals(hRegionInfoList.get(2).getValue(), hRegionInfoList.get(3).getValue()); } }
Example #26
Source File: InvalidListPruneTest.java From phoenix-tephra with Apache License 2.0 | 5 votes |
@BeforeClass public static void startMiniCluster() throws Exception { // Setup the configuration to start HBase cluster with the invalid list pruning enabled conf = HBaseConfiguration.create(); conf.setBoolean(TxConstants.TransactionPruning.PRUNE_ENABLE, true); // Flush prune data to table quickly, so that tests don't need have to wait long to see updates conf.setLong(TxConstants.TransactionPruning.PRUNE_FLUSH_INTERVAL, 0L); AbstractHBaseTableTest.startMiniCluster(); TransactionStateStorage txStateStorage = new InMemoryTransactionStateStorage(); TransactionManager txManager = new TransactionManager(conf, txStateStorage, new TxMetricsCollector()); txManager.startAndWait(); // Do some transactional data operations txDataTable1 = TableName.valueOf("invalidListPruneTestTable1"); HTable hTable = createTable(txDataTable1.getName(), new byte[][]{family}, false, Collections.singletonList(TestTransactionProcessor.class.getName())); try (TransactionAwareHTable txTable = new TransactionAwareHTable(hTable, TxConstants.ConflictDetection.ROW)) { TransactionContext txContext = new TransactionContext(new InMemoryTxSystemClient(txManager), txTable); txContext.start(); for (int i = 0; i < MAX_ROWS; ++i) { txTable.put(new Put(Bytes.toBytes(i)).add(family, qualifier, Bytes.toBytes(i))); } txContext.finish(); } testUtil.flush(txDataTable1); txManager.stopAndWait(); pruneStateTable = TableName.valueOf(conf.get(TxConstants.TransactionPruning.PRUNE_STATE_TABLE, TxConstants.TransactionPruning.DEFAULT_PRUNE_STATE_TABLE)); connection = HConnectionManager.createConnection(conf); dataJanitorState = new DataJanitorState(new DataJanitorState.TableSupplier() { @Override public HTableInterface get() throws IOException { return connection.getTable(pruneStateTable); } }); }
Example #27
Source File: Tailer.java From zerowing with MIT License | 5 votes |
protected void handleInsert(HTable table, DBObject doc) { byte[] row = _translator.createRowKey(doc); Put put = _translator.createPut(row, doc); try { table.put(put); } catch (IOException e) { log.error("Failed trying to insert object at " + row + " in " + table, e); } }
Example #28
Source File: RegionChecker.java From splicer with Apache License 2.0 | 5 votes |
public RegionChecker(Configuration config) { try { this.table = new HTable(config, "tsdb"); } catch (IOException e) { LOG.error("Could not create connection", e); throw new RegionCheckException("Could not create connection", e); } }
Example #29
Source File: QueryExample.java From yuzhouwan with Apache License 2.0 | 5 votes |
public static List<DataProtos.DataQueryResponse.Row> queryByStartRowAndStopRow(String tableName, String startRow, String stopRow, boolean isIncludeEnd, boolean isSalting) { final DataProtos.DataQueryRequest.Builder requestBuilder = DataProtos.DataQueryRequest.newBuilder(); requestBuilder.setTableName(tableName); requestBuilder.setStartRow(startRow); requestBuilder.setEndRow(stopRow); requestBuilder.setIncluedEnd(isIncludeEnd); requestBuilder.setIsSalting(isSalting); try { HTable table = new HTable(HBaseConfiguration.create(conf), tableName); Map<byte[], List<DataProtos.DataQueryResponse.Row>> result = table.coprocessorService(DataProtos.QueryDataService.class, null, null, new Batch.Call<DataProtos.QueryDataService, List<DataProtos.DataQueryResponse.Row>>() { public List<DataProtos.DataQueryResponse.Row> call(DataProtos.QueryDataService counter) throws IOException { ServerRpcController controller = new ServerRpcController(); BlockingRpcCallback<DataProtos.DataQueryResponse> rpcCallback = new BlockingRpcCallback<>(); counter.queryByStartRowAndEndRow(controller, requestBuilder.build(), rpcCallback); DataProtos.DataQueryResponse response = rpcCallback.get(); if (controller.failedOnException()) { throw controller.getFailedOn(); } return response.getRowListList(); } }); List<DataProtos.DataQueryResponse.Row> results = new LinkedList<>(); result.entrySet() .stream() .filter(entry -> null != entry.getValue()) .forEach(entry -> results.addAll(entry.getValue())); return results; } catch (Throwable e) { throw new RuntimeException(e); } }
Example #30
Source File: TestBase.java From hbase-tools with Apache License 2.0 | 5 votes |
private long getWriteRequestCountActual(String tableName) throws IOException { long writeRequestCountActual; try (HTable table = (HTable) hConnection.getTable(tableName)) { writeRequestCountActual = 0; NavigableMap<HRegionInfo, ServerName> regionLocations = table.getRegionLocations(); for (Map.Entry<HRegionInfo, ServerName> entry : regionLocations.entrySet()) { ServerLoad serverLoad = admin.getClusterStatus().getLoad(entry.getValue()); for (RegionLoad regionLoad : serverLoad.getRegionsLoad().values()) { if (Arrays.equals(entry.getKey().getRegionName(), regionLoad.getName())) writeRequestCountActual += regionLoad.getWriteRequestsCount(); } } } return writeRequestCountActual; }