Java Code Examples for org.apache.hadoop.hbase.client.Increment#addColumn()
The following examples show how to use
org.apache.hadoop.hbase.client.Increment#addColumn() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HBaseStorage.java From cantor with Apache License 2.0 | 6 votes |
/** * @return the value before increment */ @Override public Optional<Long> incrementAndGet(long category, long ts, long range) { String tbl = String.format(TABLE_FMT, category % TABLE_COUNT); Table table = tableConnections.get(tbl); try { Increment increment = new Increment(Bytes.toBytes(String.format(ROW_KEY_FMT, ts))); increment.setTTL(ttl); byte[] col = Bytes.toBytes(String.valueOf(category)); increment.addColumn(SERVICE_FAMILY, col, range); Result result = table.increment(increment); Long afterInc = Bytes.toLong(result.getValue(SERVICE_FAMILY, col)); return Optional.of(afterInc); } catch (Exception e) { if (log.isErrorEnabled()) log.error( "increment range value failed for [ category: {} ] [ timestamp {} ] [ range {} ]", category, ts, range, e); return Optional.empty(); } }
Example 2
Source File: HBaseStorage.java From cantor with Apache License 2.0 | 6 votes |
@Override public boolean heartbeat(int instanceNumber, int ttl) { try { Increment increment = new Increment( Bytes.toBytes(String.format(RUNNING_STATE_FMT, instanceNumber))); byte[] col = Bytes.toBytes("state"); increment.addColumn(INST_FAMILY, col, 1); increment.setTTL((long) ttl); metaTable.increment(increment); return true; } catch (Exception e) { if (log.isErrorEnabled()) log.error("[HBase] Failed to heartbeat.", e); return false; } }
Example 3
Source File: Sequence.java From phoenix with Apache License 2.0 | 6 votes |
@SuppressWarnings("deprecation") public Increment newIncrement(long timestamp, Sequence.ValueOp action) { Increment inc = new Increment(key.getKey()); // It doesn't matter what we set the amount too - we always use the values we get // from the Get we do to prevent any race conditions. All columns that get added // are returned with their current value try { inc.setTimeRange(MetaDataProtocol.MIN_TABLE_TIMESTAMP, timestamp); } catch (IOException e) { throw new RuntimeException(e); // Impossible } for (KeyValue kv : SEQUENCE_KV_COLUMNS) { // We don't care about the amount, as we'll add what gets looked up on the server-side inc.addColumn(kv.getFamily(), kv.getQualifier(), action.ordinal()); } return inc; }
Example 4
Source File: BasicFraudHBaseService.java From hadoop-arch-book with Apache License 2.0 | 6 votes |
public void logInProfileInHBase(long userId, String ipAddress) throws IOException, Exception { HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE); ArrayList<Row> actions = new ArrayList<Row>(); byte[] profileRowKey = generateProfileRowKey(userId); Delete delete = new Delete(profileRowKey); delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL); delete.deleteColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL); actions.add(delete); Increment increment = new Increment(profileRowKey); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1); actions.add(increment); Put put = new Put(profileRowKey); put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis())); put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(ipAddress)); actions.add(put); profileTable.batch(actions); }
Example 5
Source File: BasicFraudHBaseService.java From hadoop-arch-book with Apache License 2.0 | 6 votes |
public void updateProfileCountsForSaleInHBase(Long buyerId, Long sellerId, ItemSaleEvent event) throws IOException, InterruptedException { HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE); ArrayList<Row> actions = new ArrayList<Row>(); Increment buyerValueIncrement = new Increment(generateProfileRowKey(buyerId)); buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, event.getItemValue()); buyerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue()); actions.add(buyerValueIncrement); Increment sellerValueIncrement = new Increment(generateProfileRowKey(sellerId)); sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, event.getItemValue()); sellerValueIncrement.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, event.getItemValue()); actions.add(sellerValueIncrement); profileTable.batch(actions); }
Example 6
Source File: HBaseStorage.java From cantor with Apache License 2.0 | 5 votes |
@Override public int checkAndRegister(int maxInstances) { int i = 0; int instanceNumber = ILLEGAL_INSTANCE; while (i < maxInstances) { try { Increment increment = new Increment( Bytes.toBytes(String.format(RUNNING_STATE_FMT, i))); byte[] col = Bytes.toBytes("state"); increment.addColumn(INST_FAMILY, col, 1); Result result = metaTable.increment(increment); Long afterInc = Bytes.toLong(result.getValue(INST_FAMILY, col)); if (afterInc == REGISTERED) { instanceNumber = i; heartbeat(instanceNumber, DEFAULT_HEARTBEAT_SECONDS * 1000); break; } else { if (log.isWarnEnabled()) log.warn("[HBase] Failed to register since the instance box {} is full.", i); } } catch (Exception e) { if (log.isErrorEnabled()) log.error(String.format("[HBase] Failed to check and register on %s.", i), e); } i++; } return instanceNumber; }
Example 7
Source File: RowKeyMerge.java From pinpoint with Apache License 2.0 | 5 votes |
private Increment createIncrement(Map.Entry<RowKey, List<ColumnName>> rowKeyEntry, RowKeyDistributorByHashPrefix rowKeyDistributorByHashPrefix) { RowKey rowKey = rowKeyEntry.getKey(); byte[] key = getRowKey(rowKey, rowKeyDistributorByHashPrefix); final Increment increment = new Increment(key); for (ColumnName columnName : rowKeyEntry.getValue()) { increment.addColumn(family, columnName.getColumnName(), columnName.getCallCount()); } logger.trace("create increment row:{}, column:{}", rowKey, rowKeyEntry.getValue()); return increment; }
Example 8
Source File: TestRegionServerMetrics.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testIncrement() throws Exception { Put p = new Put(row).addColumn(cf, qualifier, Bytes.toBytes(0L)); table.put(p); for(int count = 0; count < 13; count++) { Increment inc = new Increment(row); inc.addColumn(cf, qualifier, 100); table.increment(inc); } metricsRegionServer.getRegionServerWrapper().forceRecompute(); assertCounter("incrementNumOps", 13); }
Example 9
Source File: TestRegionObserverInterface.java From hbase with Apache License 2.0 | 5 votes |
private void testPreWALAppendHook(Table table, TableName tableName) throws IOException { int expectedCalls = 0; String [] methodArray = new String[1]; methodArray[0] = "getCtPreWALAppend"; Object[] resultArray = new Object[1]; Put p = new Put(ROW); p.addColumn(A, A, A); table.put(p); resultArray[0] = ++expectedCalls; verifyMethodResult(SimpleRegionObserver.class, methodArray, tableName, resultArray); Append a = new Append(ROW); a.addColumn(B, B, B); table.append(a); resultArray[0] = ++expectedCalls; verifyMethodResult(SimpleRegionObserver.class, methodArray, tableName, resultArray); Increment i = new Increment(ROW); i.addColumn(C, C, 1); table.increment(i); resultArray[0] = ++expectedCalls; verifyMethodResult(SimpleRegionObserver.class, methodArray, tableName, resultArray); Delete d = new Delete(ROW); table.delete(d); resultArray[0] = ++expectedCalls; verifyMethodResult(SimpleRegionObserver.class, methodArray, tableName, resultArray); }
Example 10
Source File: HBaseCounterIncrementor.java From Kafka-Spark-Hbase-Example with Apache License 2.0 | 5 votes |
private void flushToHBase() throws IOException { synchronized(hTable) { if (hTable == null) { initialize(); } updateLastUsed(); for (Entry<String, CounterMap> entry: rowKeyCounterMap.entrySet()) { CounterMap pastCounterMap = entry.getValue(); rowKeyCounterMap.put(entry.getKey(), new CounterMap()); Increment increment = new Increment(Bytes.toBytes(entry.getKey())); boolean hasColumns = false; for (Entry<String, Counter> entry2 : pastCounterMap.entrySet()) { increment.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes(entry2.getKey()), entry2.getValue().value); hasColumns = true; } if (hasColumns) { updateLastUsed(); hTable.increment(increment); } } updateLastUsed(); } }
Example 11
Source File: AppSummaryService.java From hraven with Apache License 2.0 | 5 votes |
/** * creates an Increment to aggregate job details * @param {@link AppAggregationKey} * @param {@link JobDetails} * @return {@link Increment} */ private Increment incrementAppSummary(AppAggregationKey appAggKey, JobDetails jobDetails) { Increment aggIncrement = new Increment(aggConv.toBytes(appAggKey)); aggIncrement.addColumn(Constants.INFO_FAM_BYTES, AggregationConstants.TOTAL_MAPS_BYTES, jobDetails.getTotalMaps()); aggIncrement.addColumn(Constants.INFO_FAM_BYTES, AggregationConstants.TOTAL_REDUCES_BYTES, jobDetails.getTotalReduces()); aggIncrement.addColumn(Constants.INFO_FAM_BYTES, AggregationConstants.MEGABYTEMILLIS_BYTES, jobDetails.getMegabyteMillis()); aggIncrement.addColumn(Constants.INFO_FAM_BYTES, AggregationConstants.SLOTS_MILLIS_MAPS_BYTES, jobDetails.getMapSlotMillis()); aggIncrement.addColumn(Constants.INFO_FAM_BYTES, AggregationConstants.SLOTS_MILLIS_REDUCES_BYTES, jobDetails.getReduceSlotMillis()); aggIncrement.addColumn(Constants.INFO_FAM_BYTES, AggregationConstants.SLOTS_MILLIS_REDUCES_BYTES, jobDetails.getReduceSlotMillis()); aggIncrement.addColumn(Constants.INFO_FAM_BYTES, AggregationConstants.TOTAL_JOBS_BYTES, 1L); byte[] numberRowsCol = Bytes.toBytes(jobDetails.getJobKey().getRunId()); aggIncrement.addColumn(AggregationConstants.SCRATCH_FAM_BYTES, numberRowsCol, 1L); return aggIncrement; }
Example 12
Source File: BasicFraudHBaseService.java From hadoop-arch-book with Apache License 2.0 | 5 votes |
@Override public void createBulkProfile(ArrayList<ProfileCreatePojo> pojoList) throws Exception { HTableInterface profileTable = hTablePool.getTable(DataModelConsts.PROFILE_TABLE); ArrayList<Row> actions = new ArrayList<Row>(); for (ProfileCreatePojo pojo: pojoList) { byte[] rowKey = generateProfileRowKey(pojo.getUserId()); Put put = new Put(rowKey); put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.FIXED_INFO_COL, Bytes.toBytes(pojo.getPojo().getUsername() + "|" + pojo.getPojo().getAge() + "|" + System.currentTimeMillis())); put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_IP_ADDERSSES, Bytes.toBytes(pojo.getIpAddress())); put.add(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LAST_LOG_IN_COL, Bytes.toBytes(System.currentTimeMillis())); actions.add(put); Increment increment = new Increment(rowKey); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.LOG_IN_COUNT_COL, 1); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_SELLS_COL, 0); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_PURCHASES_COL, 0); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_PURCHASES_COL, 0); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.TOTAL_VALUE_OF_PAST_SELLS_COL, 0); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_SELLS_VALUE_COL, 0); increment.addColumn(DataModelConsts.PROFILE_COLUMN_FAMILY, DataModelConsts.CURRENT_LOG_IN_PURCHASES_VALUE_COL, 0); actions.add(increment); } profileTable.batch(actions); }
Example 13
Source File: TestRegionIncrement.java From hbase with Apache License 2.0 | 5 votes |
/** * Have each thread update its own Cell. Avoid contention with another thread. */ @Test public void testUnContendedSingleCellIncrement() throws IOException, InterruptedException { final HRegion region = getRegion(TEST_UTIL.getConfiguration(), TestIncrementsFromClientSide.filterStringSoTableNameSafe(this.name.getMethodName())); long startTime = System.currentTimeMillis(); try { SingleCellIncrementer [] threads = new SingleCellIncrementer[THREAD_COUNT]; for (int i = 0; i < threads.length; i++) { byte [] rowBytes = Bytes.toBytes(i); Increment increment = new Increment(rowBytes); increment.addColumn(INCREMENT_BYTES, INCREMENT_BYTES, 1); threads[i] = new SingleCellIncrementer(i, INCREMENT_COUNT, region, increment); } for (int i = 0; i < threads.length; i++) { threads[i].start(); } for (int i = 0; i < threads.length; i++) { threads[i].join(); } RegionScanner regionScanner = region.getScanner(new Scan()); List<Cell> cells = new ArrayList<>(THREAD_COUNT); while(regionScanner.next(cells)) continue; assertEquals(THREAD_COUNT, cells.size()); long total = 0; for (Cell cell: cells) total += Bytes.toLong(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); assertEquals(INCREMENT_COUNT * THREAD_COUNT, total); } finally { closeRegion(region); LOG.info(this.name.getMethodName() + " " + (System.currentTimeMillis() - startTime) + "ms"); } }
Example 14
Source File: HBaseCounterIncrementor.java From SparkOnALog with Apache License 2.0 | 5 votes |
private void flushToHBase() throws IOException { synchronized(hTable) { if (hTable == null) { initialize(); } updateLastUsed(); for (Entry<String, CounterMap> entry: rowKeyCounterMap.entrySet()) { CounterMap pastCounterMap = entry.getValue(); rowKeyCounterMap.put(entry.getKey(), new CounterMap()); Increment increment = new Increment(Bytes.toBytes(entry.getKey())); boolean hasColumns = false; for (Entry<String, Counter> entry2 : pastCounterMap.entrySet()) { increment.addColumn(Bytes.toBytes(columnFamily), Bytes.toBytes(entry2.getKey()), entry2.getValue().value); hasColumns = true; } if (hasColumns) { updateLastUsed(); hTable.increment(increment); } } updateLastUsed(); } }
Example 15
Source File: JavaHBaseBulkIncrementExample.java From learning-hadoop with Apache License 2.0 | 5 votes |
public Increment call(String v) throws Exception { String[] cells = v.split(","); Increment increment = new Increment(Bytes.toBytes(cells[0])); increment.addColumn(Bytes.toBytes(cells[1]), Bytes.toBytes(cells[2]), Integer.parseInt(cells[3])); return increment; }
Example 16
Source File: PropertyIncrementer.java From hgraphdb with Apache License 2.0 | 5 votes |
@Override public Iterator<Mutation> constructMutations() { Increment incr = new Increment(ValueUtils.serializeWithSalt(element.id())); incr.addColumn(Constants.DEFAULT_FAMILY_BYTES, Bytes.toBytes(key), value); Put put = new Put(ValueUtils.serializeWithSalt(element.id())); put.addColumn(Constants.DEFAULT_FAMILY_BYTES, Constants.UPDATED_AT_BYTES, ValueUtils.serialize(((HBaseElement) element).updatedAt())); return IteratorUtils.of(incr, put); }
Example 17
Source File: TestSpaceQuotaBasicFunctioning.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testNoInsertsWithIncrement() throws Exception { Increment i = new Increment(Bytes.toBytes("to_reject")); i.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("count"), 0); helper.writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_INSERTS, i); }
Example 18
Source File: TestSpaceQuotaBasicFunctioning.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testNoWritesWithIncrement() throws Exception { Increment i = new Increment(Bytes.toBytes("to_reject")); i.addColumn(Bytes.toBytes(SpaceQuotaHelperForTests.F1), Bytes.toBytes("count"), 0); helper.writeUntilViolationAndVerifyViolation(SpaceViolationPolicy.NO_WRITES, i); }
Example 19
Source File: TestPassCustomCellViaRegionObserver.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testMutation() throws Exception { Put put = new Put(ROW); put.addColumn(FAMILY, QUALIFIER, VALUE); table.put(put); byte[] value = VALUE; assertResult(table.get(new Get(ROW)), value, value); assertObserverHasExecuted(); Increment inc = new Increment(ROW); inc.addColumn(FAMILY, QUALIFIER, 10L); table.increment(inc); // QUALIFIER -> 10 (put) + 10 (increment) // QUALIFIER_FROM_CP -> 10 (from cp's put) + 10 (from cp's increment) value = Bytes.toBytes(20L); assertResult(table.get(new Get(ROW)), value, value); assertObserverHasExecuted(); Append append = new Append(ROW); append.addColumn(FAMILY, QUALIFIER, APPEND_VALUE); table.append(append); // 10L + "MB" value = ByteBuffer.wrap(new byte[value.length + APPEND_VALUE.length]) .put(value) .put(APPEND_VALUE) .array(); assertResult(table.get(new Get(ROW)), value, value); assertObserverHasExecuted(); Delete delete = new Delete(ROW); delete.addColumns(FAMILY, QUALIFIER); table.delete(delete); assertTrue(Arrays.asList(table.get(new Get(ROW)).rawCells()).toString(), table.get(new Get(ROW)).isEmpty()); assertObserverHasExecuted(); assertTrue(table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put)); assertObserverHasExecuted(); assertTrue( table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenDelete(delete)); assertObserverHasExecuted(); assertTrue(table.get(new Get(ROW)).isEmpty()); }
Example 20
Source File: TestResettingCounters.java From hbase with Apache License 2.0 | 4 votes |
@Test public void testResettingCounters() throws Exception { HBaseTestingUtility htu = new HBaseTestingUtility(); Configuration conf = htu.getConfiguration(); FileSystem fs = FileSystem.get(conf); byte [] table = Bytes.toBytes(name.getMethodName()); byte [][] families = new byte [][] { Bytes.toBytes("family1"), Bytes.toBytes("family2"), Bytes.toBytes("family3") }; int numQualifiers = 10; byte [][] qualifiers = new byte [numQualifiers][]; for (int i=0; i<numQualifiers; i++) qualifiers[i] = Bytes.toBytes("qf" + i); int numRows = 10; byte [][] rows = new byte [numRows][]; for (int i=0; i<numRows; i++) rows[i] = Bytes.toBytes("r" + i); TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor = new TableDescriptorBuilder.ModifyableTableDescriptor(TableName.valueOf(table)); for (byte[] family : families) { tableDescriptor.setColumnFamily( new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family)); } RegionInfo hri = RegionInfoBuilder.newBuilder(tableDescriptor.getTableName()).build(); String testDir = htu.getDataTestDir() + "/TestResettingCounters/"; Path path = new Path(testDir); if (fs.exists(path)) { if (!fs.delete(path, true)) { throw new IOException("Failed delete of " + path); } } HRegion region = HBaseTestingUtility.createRegionAndWAL(hri, path, conf, tableDescriptor); try { Increment odd = new Increment(rows[0]); odd.setDurability(Durability.SKIP_WAL); Increment even = new Increment(rows[0]); even.setDurability(Durability.SKIP_WAL); Increment all = new Increment(rows[0]); all.setDurability(Durability.SKIP_WAL); for (int i=0;i<numQualifiers;i++) { if (i % 2 == 0) even.addColumn(families[0], qualifiers[i], 1); else odd.addColumn(families[0], qualifiers[i], 1); all.addColumn(families[0], qualifiers[i], 1); } // increment odd qualifiers 5 times and flush for (int i=0;i<5;i++) region.increment(odd, HConstants.NO_NONCE, HConstants.NO_NONCE); region.flush(true); // increment even qualifiers 5 times for (int i=0;i<5;i++) region.increment(even, HConstants.NO_NONCE, HConstants.NO_NONCE); // increment all qualifiers, should have value=6 for all Result result = region.increment(all, HConstants.NO_NONCE, HConstants.NO_NONCE); assertEquals(numQualifiers, result.size()); Cell[] kvs = result.rawCells(); for (int i=0;i<kvs.length;i++) { System.out.println(kvs[i].toString()); assertTrue(CellUtil.matchingQualifier(kvs[i], qualifiers[i])); assertEquals(6, Bytes.toLong(CellUtil.cloneValue(kvs[i]))); } } finally { HBaseTestingUtility.closeRegionAndWAL(region); } HBaseTestingUtility.closeRegionAndWAL(region); }