Java Code Examples for org.apache.hadoop.hbase.client.Result#getRow()
The following examples show how to use
org.apache.hadoop.hbase.client.Result#getRow() .
These examples are extracted from open source projects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink File: AbstractTableInputFormat.java License: Apache License 2.0 | 6 votes |
public T nextRecord(T reuse) throws IOException { if (resultScanner == null) { throw new IOException("No table result scanner provided!"); } Result res; try { res = resultScanner.next(); } catch (Exception e) { resultScanner.close(); //workaround for timeout on scan LOG.warn("Error after scan of " + scannedRows + " rows. Retry with a new scanner...", e); scan.withStartRow(currentRow, false); resultScanner = table.getScanner(scan); res = resultScanner.next(); } if (res != null) { scannedRows++; currentRow = res.getRow(); return mapResultToOutType(res); } endReached = true; return null; }
Example 2
Source Project: SkyEye File: TraceDto.java License: GNU General Public License v3.0 | 6 votes |
@Override public TraceDto mapRow(Result res, int rowNum) throws Exception { String traceId = new String(res.getRow()); NavigableMap<byte[], byte[]> data = res.getFamilyMap(Constants.TABLE_TRACE_COLUMN_FAMILY.getBytes()); String spanId; JSONObject spanDetail; TreeMap<String, JSONObject> map = new TreeMap<>(); Set<Map.Entry<byte[], byte[]>> spanEntrySet = data.entrySet(); for (Map.Entry<byte[], byte[]> entry : spanEntrySet) { spanId = new String(entry.getKey()); spanDetail = JSON.parseObject(new String(entry.getValue())); map.put(spanId, spanDetail); } Set<Map.Entry<String, JSONObject>> spans = map.entrySet(); TraceDto rtn = new TraceDto(); rtn.setTraceId(traceId).setSpans(spans); return rtn; }
Example 3
Source Project: hbase File: QuotaTableUtil.java License: Apache License 2.0 | 6 votes |
/** * Returns a list of {@code Delete} to remove all entries returned by the passed scanner. * @param connection connection to re-use * @param scan the scanner to use to generate the list of deletes */ static List<Delete> createDeletesForExistingSnapshotsFromScan(Connection connection, Scan scan) throws IOException { List<Delete> deletes = new ArrayList<>(); try (Table quotaTable = connection.getTable(QUOTA_TABLE_NAME); ResultScanner rs = quotaTable.getScanner(scan)) { for (Result r : rs) { CellScanner cs = r.cellScanner(); while (cs.advance()) { Cell c = cs.current(); byte[] family = Bytes.copy(c.getFamilyArray(), c.getFamilyOffset(), c.getFamilyLength()); byte[] qual = Bytes.copy(c.getQualifierArray(), c.getQualifierOffset(), c.getQualifierLength()); Delete d = new Delete(r.getRow()); d.addColumns(family, qual); deletes.add(d); } } return deletes; } }
Example 4
Source Project: antsdb File: Helper.java License: GNU Lesser General Public License v3.0 | 5 votes |
public static long toIndexLine(Heap heap, Result r) { if (r.isEmpty()) { return 0; } NavigableMap<byte[], byte[]> sys = r.getFamilyMap(DATA_COLUMN_FAMILY_BYTES); byte[] indexKey = r.getRow(); byte[] rowKey = sys.get(SYS_COLUMN_INDEXKEY_BYTES); byte misc = sys.get(SYS_COLUMN_MISC_BYTES)[0]; indexKey = hbaseKeyToAnts(indexKey); rowKey = hbaseKeyToAnts(rowKey); return IndexLine.alloc(heap, indexKey, rowKey, misc).getAddress(); }
Example 5
Source Project: hbase File: TestVerifyReplication.java License: Apache License 2.0 | 5 votes |
static void runBatchCopyTest() throws Exception { // normal Batch tests for htable1 loadData("", row, noRepfamName); Scan scan1 = new Scan(); List<Put> puts = new ArrayList<>(NB_ROWS_IN_BATCH); ResultScanner scanner1 = htable1.getScanner(scan1); Result[] res1 = scanner1.next(NB_ROWS_IN_BATCH); for (Result result : res1) { Put put = new Put(result.getRow()); for (Cell cell : result.rawCells()) { put.add(cell); } puts.add(put); } scanner1.close(); assertEquals(NB_ROWS_IN_BATCH, res1.length); // Copy the data to htable3 htable3.put(puts); Scan scan2 = new Scan(); ResultScanner scanner2 = htable3.getScanner(scan2); Result[] res2 = scanner2.next(NB_ROWS_IN_BATCH); scanner2.close(); assertEquals(NB_ROWS_IN_BATCH, res2.length); }
Example 6
Source Project: hbase File: MobStressToolRunner.java License: Apache License 2.0 | 5 votes |
private void scanTable() { try { Result result; ResultScanner scanner = table.getScanner(fam); int counter = 0; while ((result = scanner.next()) != null) { byte[] key = result.getRow(); assertTrue(Arrays.equals(result.getValue(fam, qualifier), Bytes.add(key,mobVal))); if (counter % 10000 == 0) { LOG.info("GET=" + counter+" key=" + Bytes.toInt(key)); } counter++; } assertEquals(count, counter); } catch (Exception e) { e.printStackTrace(); LOG.error("MOB Stress Test FAILED"); if (HTU != null) { assertTrue(false); } else { System.exit(-1); } } }
Example 7
Source Project: localization_nifi File: HBase_1_1_2_ClientService.java License: Apache License 2.0 | 5 votes |
@Override public void scan(final String tableName, final Collection<Column> columns, final String filterExpression, final long minTime, final ResultHandler handler) throws IOException { Filter filter = null; if (!StringUtils.isBlank(filterExpression)) { ParseFilter parseFilter = new ParseFilter(); filter = parseFilter.parseFilterString(filterExpression); } try (final Table table = connection.getTable(TableName.valueOf(tableName)); final ResultScanner scanner = getResults(table, columns, filter, minTime)) { for (final Result result : scanner) { final byte[] rowKey = result.getRow(); final Cell[] cells = result.rawCells(); if (cells == null) { continue; } // convert HBase cells to NiFi cells final ResultCell[] resultCells = new ResultCell[cells.length]; for (int i=0; i < cells.length; i++) { final Cell cell = cells[i]; final ResultCell resultCell = getResultCell(cell); resultCells[i] = resultCell; } // delegate to the handler handler.handle(rowKey, resultCells); } } }
Example 8
Source Project: antsdb File: Helper.java License: GNU Lesser General Public License v3.0 | 5 votes |
public static Map<String, byte[]> toMap(Result r) { Map<String, byte[]> row = new HashMap<>(); byte[] key = r.getRow(); row.put("", key); for (Map.Entry<byte[],NavigableMap<byte[],byte[]>> i:r.getNoVersionMap().entrySet()) { String cf = new String(i.getKey()); for (Map.Entry<byte[],byte[]> j:i.getValue().entrySet()) { String q = new String(getKeyName(j.getKey())); String name = cf + ":" + q; row.put(name, j.getValue()); } } return row; }
Example 9
Source Project: nifi File: HBase_1_1_2_ClientService.java License: Apache License 2.0 | 5 votes |
@Override public void scan(final String tableName, final String startRow, final String endRow, String filterExpression, final Long timerangeMin, final Long timerangeMax, final Integer limitRows, final Boolean isReversed, final Boolean blockCache, final Collection<Column> columns, List<String> visibilityLabels, final ResultHandler handler) throws IOException { try (final Table table = connection.getTable(TableName.valueOf(tableName)); final ResultScanner scanner = getResults(table, startRow, endRow, filterExpression, timerangeMin, timerangeMax, limitRows, isReversed, blockCache, columns, visibilityLabels)) { int cnt = 0; final int lim = limitRows != null ? limitRows : 0; for (final Result result : scanner) { if (lim > 0 && ++cnt > lim){ break; } final byte[] rowKey = result.getRow(); final Cell[] cells = result.rawCells(); if (cells == null) { continue; } // convert HBase cells to NiFi cells final ResultCell[] resultCells = new ResultCell[cells.length]; for (int i = 0; i < cells.length; i++) { final Cell cell = cells[i]; final ResultCell resultCell = getResultCell(cell); resultCells[i] = resultCell; } // delegate to the handler handler.handle(rowKey, resultCells); } } }
Example 10
Source Project: pinpoint File: ApplicationStatMapper.java License: Apache License 2.0 | 5 votes |
@Override public List<JoinStatBo> mapRow(Result result, int rowNum) throws Exception { if (result.isEmpty()) { return Collections.emptyList(); } final byte[] distributedRowKey = result.getRow(); final String applicationId = this.hbaseOperationFactory.getApplicationId(distributedRowKey); final long baseTimestamp = this.hbaseOperationFactory.getBaseTimestamp(distributedRowKey); List<JoinStatBo> dataPoints = new ArrayList<>(); for (Cell cell : result.rawCells()) { if (CellUtil.matchingFamily(cell, HbaseColumnFamily.APPLICATION_STAT_STATISTICS.getName())) { Buffer qualifierBuffer = new OffsetFixedBuffer(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); Buffer valueBuffer = new OffsetFixedBuffer(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); long timestampDelta = this.decoder.decodeQualifier(qualifierBuffer); ApplicationStatDecodingContext decodingContext = new ApplicationStatDecodingContext(); decodingContext.setApplicationId(applicationId); decodingContext.setBaseTimestamp(baseTimestamp); decodingContext.setTimestampDelta(timestampDelta); List<JoinStatBo> candidates = this.decoder.decodeValue(valueBuffer, decodingContext); for (JoinStatBo candidate : candidates) { long timestamp = candidate.getTimestamp(); if (this.filter.filter(timestamp)) { continue; } dataPoints.add(candidate); } } } // Reverse sort as timestamp is stored in a reversed order. dataPoints.sort(REVERSE_TIMESTAMP_COMPARATOR); return dataPoints; }
Example 11
Source Project: hbase File: IntegrationTestWithCellVisibilityLoadAndVerify.java License: Apache License 2.0 | 5 votes |
@Override protected void map(ImmutableBytesWritable key, Result value, Context context) throws IOException, InterruptedException { byte[] row = value.getRow(); Counter c = getCounter(row); c.increment(1); }
Example 12
Source Project: phoenix File: IndexToolForNonTxGlobalIndexIT.java License: Apache License 2.0 | 5 votes |
public void deleteAllRows(Connection conn, TableName tableName) throws SQLException, IOException { Scan scan = new Scan(); Table table = conn.unwrap(PhoenixConnection.class).getQueryServices(). getTable(tableName.getName()); try (ResultScanner scanner = table.getScanner(scan)) { for (Result r : scanner) { Delete del = new Delete(r.getRow()); table.delete(del); } } }
Example 13
Source Project: nifi File: HBase_2_ClientService.java License: Apache License 2.0 | 5 votes |
@Override public void scan(String tableName, Collection<Column> columns, String filterExpression, long minTime, List<String> visibilityLabels, ResultHandler handler) throws IOException { Filter filter = null; if (!StringUtils.isBlank(filterExpression)) { ParseFilter parseFilter = new ParseFilter(); filter = parseFilter.parseFilterString(filterExpression); } try (final Table table = connection.getTable(TableName.valueOf(tableName)); final ResultScanner scanner = getResults(table, columns, filter, minTime, visibilityLabels)) { for (final Result result : scanner) { final byte[] rowKey = result.getRow(); final Cell[] cells = result.rawCells(); if (cells == null) { continue; } // convert HBase cells to NiFi cells final ResultCell[] resultCells = new ResultCell[cells.length]; for (int i=0; i < cells.length; i++) { final Cell cell = cells[i]; final ResultCell resultCell = getResultCell(cell); resultCells[i] = resultCell; } // delegate to the handler handler.handle(rowKey, resultCells); } } }
Example 14
Source Project: hbase File: RegionReplicaInfo.java License: Apache License 2.0 | 5 votes |
private RegionReplicaInfo(final Result result, final HRegionLocation location) { this.row = result != null ? result.getRow() : null; this.regionInfo = location != null ? location.getRegion() : null; this.regionState = (result != null && regionInfo != null) ? RegionStateStore.getRegionState(result, regionInfo) : null; this.serverName = location != null ? location.getServerName() : null; this.seqNum = (location != null) ? location.getSeqNum() : HConstants.NO_SEQNUM; this.targetServerName = (result != null && regionInfo != null) ? MetaTableAccessor.getTargetServerName(result, regionInfo.getReplicaId()) : null; this.mergeRegionInfo = (result != null) ? MetaTableAccessor.getMergeRegionsWithName(result.rawCells()) : null; if (result != null) { PairOfSameType<RegionInfo> daughterRegions = MetaTableAccessor.getDaughterRegions(result); this.splitRegionInfo = new LinkedHashMap<>(); if (daughterRegions.getFirst() != null) { splitRegionInfo.put(HConstants.SPLITA_QUALIFIER_STR, daughterRegions.getFirst()); } if (daughterRegions.getSecond() != null) { splitRegionInfo.put(HConstants.SPLITB_QUALIFIER_STR, daughterRegions.getSecond()); } } else { this.splitRegionInfo = null; } }
Example 15
Source Project: Eagle File: HBaseLogReader.java License: Apache License 2.0 | 5 votes |
public InternalLog read() throws IOException { if (rs == null) throw new IllegalArgumentException( "ResultScanner must be initialized before reading"); InternalLog t = null; Result r = rs.next(); if (r != null) { byte[] row = r.getRow(); // skip the first 4 bytes : prefix long timestamp = ByteUtil.bytesToLong(row, 4); // reverse timestamp timestamp = Long.MAX_VALUE - timestamp; int count = 0; if(qualifiers != null){ count = qualifiers.length; } byte[][] values = new byte[count][]; Map<String, byte[]> allQualifierValues = new HashMap<String, byte[]>(); for (int i = 0; i < count; i++) { // TODO if returned value is null, it means no this column for this row, so why set null to the object? values[i] = r.getValue(schema.getColumnFamily().getBytes(), qualifiers[i]); allQualifierValues.put(new String(qualifiers[i]), values[i]); } t = buildObject(row, timestamp, allQualifierValues); } return t; }
Example 16
Source Project: Kylin File: GridTableHBaseBenchmark.java License: Apache License 2.0 | 4 votes |
private static void prepareData(HConnection conn) throws IOException { HTableInterface table = conn.getTable(TEST_TABLE); try { // check how many rows existing int nRows = 0; Scan scan = new Scan(); scan.setFilter(new KeyOnlyFilter()); ResultScanner scanner = table.getScanner(scan); for (Result r : scanner) { r.getRow(); // nothing to do nRows++; } if (nRows > 0) { System.out.println(nRows + " existing rows"); if (nRows != N_ROWS) throw new IOException("Expect " + N_ROWS + " rows but it is not"); return; } // insert rows into empty table System.out.println("Writing " + N_ROWS + " rows to " + TEST_TABLE); long nBytes = 0; for (int i = 0; i < N_ROWS; i++) { byte[] rowkey = Bytes.toBytes(i); Put put = new Put(rowkey); byte[] cell = randomBytes(); put.add(CF, QN, cell); table.put(put); nBytes += cell.length; dot(i, N_ROWS); } System.out.println(); System.out.println("Written " + N_ROWS + " rows, " + nBytes + " bytes"); } finally { IOUtils.closeQuietly(table); } }
Example 17
Source Project: hraven File: ProcessRecordService.java License: Apache License 2.0 | 4 votes |
/** * Transform results pulled from a scanner and turn into a list of * ProcessRecords. * * @param scanner used to pull the results from, in the order determined by * the scanner. * @param maxCount maximum number of results to return. * @return */ private List<ProcessRecord> createFromResults(ResultScanner scanner, int maxCount) { // Defensive coding if ((maxCount <= 0) || (scanner == null)) { return new ArrayList<ProcessRecord>(0); } List<ProcessRecord> records = new ArrayList<ProcessRecord>(); for (Result result : scanner) { byte[] row = result.getRow(); ProcessRecordKey key = keyConv.fromBytes(row); KeyValue keyValue = result.getColumnLatest(Constants.INFO_FAM_BYTES, Constants.MIN_MOD_TIME_MILLIS_COLUMN_BYTES); long minModificationTimeMillis = Bytes.toLong(keyValue.getValue()); keyValue = result.getColumnLatest(Constants.INFO_FAM_BYTES, Constants.PROCESSED_JOB_FILES_COLUMN_BYTES); int processedJobFiles = Bytes.toInt(keyValue.getValue()); keyValue = result.getColumnLatest(Constants.INFO_FAM_BYTES, Constants.PROCESS_FILE_COLUMN_BYTES); String processingDirectory = Bytes.toString(keyValue.getValue()); keyValue = result.getColumnLatest(Constants.INFO_FAM_BYTES, Constants.PROCESSING_STATE_COLUMN_BYTES); ProcessState processState = ProcessState.getProcessState(Bytes.toInt(keyValue.getValue())); keyValue = result.getColumnLatest(Constants.INFO_FAM_BYTES, Constants.MIN_JOB_ID_COLUMN_BYTES); String minJobId = null; if (keyValue != null) { minJobId = Bytes.toString(keyValue.getValue()); } keyValue = result.getColumnLatest(Constants.INFO_FAM_BYTES, Constants.MAX_JOB_ID_COLUMN_BYTES); String maxJobId = null; if (keyValue != null) { maxJobId = Bytes.toString(keyValue.getValue()); } ProcessRecord processRecord = new ProcessRecord(key.getCluster(), processState, minModificationTimeMillis, key.getTimestamp(), processedJobFiles, processingDirectory, minJobId, maxJobId); records.add(processRecord); // Check if we retrieved enough records. if (records.size() >= maxCount) { break; } } LOG.info("Returning " + records.size() + " process records"); return records; }
Example 18
Source Project: hbase-orm File: HBObjectMapper.java License: Apache License 2.0 | 4 votes |
private boolean isResultEmpty(Result result) { if (result == null || result.isEmpty()) return true; byte[] rowBytes = result.getRow(); return rowBytes == null || rowBytes.length == 0; }
Example 19
Source Project: Kylin File: PingHBaseCLI.java License: Apache License 2.0 | 4 votes |
public static void main(String[] args) throws IOException { String metadataUrl = args[0]; String hbaseTable = args[1]; System.out.println("Hello friend."); Configuration hconf = HadoopUtil.newHBaseConfiguration(metadataUrl); if (User.isHBaseSecurityEnabled(hconf)) { try { System.out.println("--------------Getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName()); TokenUtil.obtainAndCacheToken(hconf, UserGroupInformation.getCurrentUser()); } catch (InterruptedException e) { System.out.println("--------------Error while getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName()); } } Scan scan = new Scan(); int limit = 20; HConnection conn = null; HTableInterface table = null; ResultScanner scanner = null; try { conn = HConnectionManager.createConnection(hconf); table = conn.getTable(hbaseTable); scanner = table.getScanner(scan); int count = 0; for (Result r : scanner) { byte[] rowkey = r.getRow(); System.out.println(Bytes.toStringBinary(rowkey)); count++; if (count == limit) break; } } finally { if (scanner != null) { scanner.close(); } if (table != null) { table.close(); } if (conn != null) { conn.close(); } } }
Example 20
Source Project: phoenix-omid File: TestBaillisAnomaliesWithTXs.java License: Apache License 2.0 | 4 votes |
@Test(timeOut = 10_000) public void testSIPreventsReadSkewUsingWritePredicate(ITestContext context) throws Exception { // TX History for G-single: // begin; set transaction isolation level repeatable read; -- T1 // begin; set transaction isolation level repeatable read; -- T2 // select * from test where id = 1; -- T1. Shows 1 => 10 // select * from test; -- T2 // update test set value = 12 where id = 1; -- T2 // update test set value = 18 where id = 2; -- T2 // commit; -- T2 // delete from test where value = 20; -- T1. Prints "ERROR: could not serialize access due to concurrent update" // abort; -- T1. There's nothing else we can do, this transaction has failed // 0) Start transactions TransactionManager tm = newTransactionManager(context); TTable txTable = new TTable(connection, TEST_TABLE); Transaction tx1 = tm.begin(); Transaction tx2 = tm.begin(); // 1) select * from test; -- T1 assertNumberOfRows(txTable, tx1, 2, new Scan()); // 2) select * from test; -- T2 assertNumberOfRows(txTable, tx2, 2, new Scan()); // 3) update test set value = 12 where id = 1; -- T2 // 4) update test set value = 18 where id = 2; -- T2 Put updateRow1Tx2 = new Put(rowId1); updateRow1Tx2.addColumn(famName, colName, Bytes.toBytes(12)); Put updateRow2Tx2 = new Put(rowId2); updateRow2Tx2.addColumn(famName, colName, Bytes.toBytes(18)); txTable.put(tx2, Arrays.asList(updateRow1Tx2, updateRow2Tx2)); // 5) commit; -- T2 tm.commit(tx2); // 6) delete from test where value = 20; -- T1. Prints // "ERROR: could not serialize access due to concurrent update" Filter f = new SingleColumnValueFilter(famName, colName, CompareFilter.CompareOp.EQUAL, Bytes.toBytes(20)); Scan checkFor20 = new Scan(); checkFor20.setFilter(f); ResultScanner checkFor20Scanner = txTable.getScanner(tx1, checkFor20); Result res = checkFor20Scanner.next(); while (res != null) { LOG.info("RESSS {}", res); LOG.info("Deleting row id {} with value {}", Bytes.toString(res.getRow()), Bytes.toInt(res.getValue(famName, colName))); Delete delete20 = new Delete(res.getRow()); txTable.delete(tx1, delete20); res = checkFor20Scanner.next(); } // 7) abort; -- T1 try { tm.commit(tx1); fail("Should be aborted"); } catch (RollbackException e) { // Expected } }