Java Code Examples for org.apache.hadoop.hbase.regionserver.RegionScanner#getRegionInfo()
The following examples show how to use
org.apache.hadoop.hbase.regionserver.RegionScanner#getRegionInfo() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SpillableGroupByCache.java From phoenix with BSD 3-Clause "New" or "Revised" License | 5 votes |
@Override public RegionScanner getScanner(final RegionScanner s) { final Iterator<Entry<ImmutableBytesWritable, Aggregator[]>> cacheIter = new EntryIterator(); // scanner using the spillable implementation return new BaseRegionScanner() { @Override public HRegionInfo getRegionInfo() { return s.getRegionInfo(); } @Override public void close() throws IOException { try { s.close(); } finally { // Always close gbCache and swallow possible Exceptions Closeables.closeQuietly(SpillableGroupByCache.this); } } @Override public boolean next(List<KeyValue> results) throws IOException { if (!cacheIter.hasNext()) { return false; } Map.Entry<ImmutableBytesWritable, Aggregator[]> ce = cacheIter.next(); ImmutableBytesWritable key = ce.getKey(); Aggregator[] aggs = ce.getValue(); byte[] value = aggregators.toBytes(aggs); if (logger.isDebugEnabled()) { logger.debug("Adding new distinct group: " + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) + " with aggregators " + aggs.toString() + " value = " + Bytes.toStringBinary(value)); } results.add(KeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); return cacheIter.hasNext(); } }; }
Example 2
Source File: SpillableGroupByCache.java From phoenix with Apache License 2.0 | 4 votes |
@Override public RegionScanner getScanner(final RegionScanner s) { final Iterator<Entry<ImmutableBytesWritable, Aggregator[]>> cacheIter = new EntryIterator(); // scanner using the spillable implementation return new BaseRegionScanner() { @Override public HRegionInfo getRegionInfo() { return s.getRegionInfo(); } @Override public void close() throws IOException { try { s.close(); } finally { // Always close gbCache and swallow possible Exceptions Closeables.closeQuietly(SpillableGroupByCache.this); } } @Override public boolean next(List<Cell> results) throws IOException { if (!cacheIter.hasNext()) { return false; } Map.Entry<ImmutableBytesWritable, Aggregator[]> ce = cacheIter.next(); ImmutableBytesWritable key = ce.getKey(); Aggregator[] aggs = ce.getValue(); byte[] value = aggregators.toBytes(aggs); if (logger.isDebugEnabled()) { logger.debug("Adding new distinct group: " + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) + " with aggregators " + aggs.toString() + " value = " + Bytes.toStringBinary(value)); } results.add(KeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length)); return cacheIter.hasNext(); } @Override public long getMaxResultSize() { return s.getMaxResultSize(); } }; }
Example 3
Source File: GroupedAggregateRegionObserver.java From phoenix with Apache License 2.0 | 4 votes |
@Override public RegionScanner getScanner(final RegionScanner s) { // Compute final allocation long estSize = sizeOfUnorderedGroupByMap(aggregateMap.size(), aggregators.getEstimatedByteSize()); chunk.resize(estSize); final List<KeyValue> aggResults = new ArrayList<KeyValue>(aggregateMap.size()); final Iterator<Map.Entry<ImmutableBytesPtr, Aggregator[]>> cacheIter = aggregateMap.entrySet().iterator(); while (cacheIter.hasNext()) { Map.Entry<ImmutableBytesPtr, Aggregator[]> entry = cacheIter.next(); ImmutableBytesPtr key = entry.getKey(); Aggregator[] rowAggregators = entry.getValue(); // Generate byte array of Aggregators and set as value of row byte[] value = aggregators.toBytes(rowAggregators); if (logger.isDebugEnabled()) { logger.debug(LogUtil.addCustomAnnotations("Adding new distinct group: " + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) + " with aggregators " + Arrays.asList(rowAggregators).toString() + " value = " + Bytes.toStringBinary(value), customAnnotations)); } KeyValue keyValue = KeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); aggResults.add(keyValue); } // scanner using the non spillable, memory-only implementation return new BaseRegionScanner() { private int index = 0; @Override public HRegionInfo getRegionInfo() { return s.getRegionInfo(); } @Override public void close() throws IOException { try { s.close(); } finally { InMemoryGroupByCache.this.close(); } } @Override public boolean next(List<Cell> results) throws IOException { if (index >= aggResults.size()) return false; results.add(aggResults.get(index)); index++; return index < aggResults.size(); } @Override public long getMaxResultSize() { return s.getMaxResultSize(); } }; }
Example 4
Source File: GroupedAggregateRegionObserver.java From phoenix with BSD 3-Clause "New" or "Revised" License | 4 votes |
@Override public RegionScanner getScanner(final RegionScanner s) { // Compute final allocation int estSize = sizeOfUnorderedGroupByMap(aggregateMap.size(), aggregators.getEstimatedByteSize()); chunk.resize(estSize); final List<KeyValue> aggResults = new ArrayList<KeyValue>(aggregateMap.size()); final Iterator<Map.Entry<ImmutableBytesPtr, Aggregator[]>> cacheIter = aggregateMap.entrySet().iterator(); while (cacheIter.hasNext()) { Map.Entry<ImmutableBytesPtr, Aggregator[]> entry = cacheIter.next(); ImmutableBytesPtr key = entry.getKey(); Aggregator[] rowAggregators = entry.getValue(); // Generate byte array of Aggregators and set as value of row byte[] value = aggregators.toBytes(rowAggregators); if (logger.isDebugEnabled()) { logger.debug("Adding new distinct group: " + Bytes.toStringBinary(key.get(), key.getOffset(), key.getLength()) + " with aggregators " + Arrays.asList(rowAggregators).toString() + " value = " + Bytes.toStringBinary(value)); } KeyValue keyValue = KeyValueUtil.newKeyValue(key.get(), key.getOffset(), key.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); aggResults.add(keyValue); } // scanner using the non spillable, memory-only implementation return new BaseRegionScanner() { private int index = 0; @Override public HRegionInfo getRegionInfo() { return s.getRegionInfo(); } @Override public void close() throws IOException { try { s.close(); } finally { InMemoryGroupByCache.this.close(); } } @Override public boolean next(List<KeyValue> results) throws IOException { if (index >= aggResults.size()) return false; results.add(aggResults.get(index)); index++; return index < aggResults.size(); } }; }
Example 5
Source File: GroupedAggregateRegionObserver.java From phoenix with BSD 3-Clause "New" or "Revised" License | 4 votes |
/** * Used for an aggregate query in which the key order match the group by key order. In this * case, we can do the aggregation as we scan, by detecting when the group by key changes. */ private RegionScanner scanOrdered(final ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, final RegionScanner s, final List<Expression> expressions, final ServerAggregators aggregators) { if (logger.isDebugEnabled()) { logger.debug("Grouped aggregation over ordered rows with scan " + scan + ", group by " + expressions + ", aggregators " + aggregators); } return new BaseRegionScanner() { private ImmutableBytesWritable currentKey = null; @Override public HRegionInfo getRegionInfo() { return s.getRegionInfo(); } @Override public void close() throws IOException { s.close(); } @Override public boolean next(List<KeyValue> results) throws IOException { boolean hasMore; boolean aggBoundary = false; MultiKeyValueTuple result = new MultiKeyValueTuple(); ImmutableBytesWritable key = null; Aggregator[] rowAggregators = aggregators.getAggregators(); HRegion region = c.getEnvironment().getRegion(); MultiVersionConsistencyControl.setThreadReadPoint(s.getMvccReadPoint()); region.startRegionOperation(); try { do { List<KeyValue> kvs = new ArrayList<KeyValue>(); // Results are potentially returned even when the return // value of s.next is false // since this is an indication of whether or not there // are more values after the // ones returned hasMore = s.nextRaw(kvs, null); if (!kvs.isEmpty()) { result.setKeyValues(kvs); key = TupleUtil.getConcatenatedValue(result, expressions); aggBoundary = currentKey != null && currentKey.compareTo(key) != 0; if (!aggBoundary) { aggregators.aggregate(rowAggregators, result); if (logger.isDebugEnabled()) { logger.debug("Row passed filters: " + kvs + ", aggregated values: " + Arrays.asList(rowAggregators)); } currentKey = key; } } } while (hasMore && !aggBoundary); } finally { region.closeRegionOperation(); } if (currentKey != null) { byte[] value = aggregators.toBytes(rowAggregators); KeyValue keyValue = KeyValueUtil.newKeyValue(currentKey.get(), currentKey.getOffset(), currentKey.getLength(), SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length); results.add(keyValue); if (logger.isDebugEnabled()) { logger.debug("Adding new aggregate row: " + keyValue + ",for current key " + Bytes.toStringBinary(currentKey.get(), currentKey.getOffset(), currentKey.getLength()) + ", aggregated values: " + Arrays.asList(rowAggregators)); } // If we're at an aggregation boundary, reset the // aggregators and // aggregate with the current result (which is not a part of // the returned result). if (aggBoundary) { aggregators.reset(rowAggregators); aggregators.aggregate(rowAggregators, result); currentKey = key; } } // Continue if there are more if (hasMore || aggBoundary) { return true; } currentKey = null; return false; } }; }