Java Code Examples for org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment#getConfiguration()

The following examples show how to use org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment#getConfiguration() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ReplicationObserver.java    From hbase with Apache License 2.0 6 votes vote down vote up
@Override
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
    justification="NPE should never happen; if it does it is a bigger issue")
public void preCommitStoreFile(final ObserverContext<RegionCoprocessorEnvironment> ctx,
    final byte[] family, final List<Pair<Path, Path>> pairs) throws IOException {
  RegionCoprocessorEnvironment env = ctx.getEnvironment();
  Configuration c = env.getConfiguration();
  if (pairs == null || pairs.isEmpty() ||
      !c.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
        HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT)) {
    LOG.debug("Skipping recording bulk load entries in preCommitStoreFile for bulkloaded "
        + "data replication.");
    return;
  }
  // This is completely cheating AND getting a HRegionServer from a RegionServerEnvironment is
  // just going to break. This is all private. Not allowed. Regions shouldn't assume they are
  // hosted in a RegionServer. TODO: fix.
  RegionServerServices rss = ((HasRegionServerServices)env).getRegionServerServices();
  Replication rep = (Replication)((HRegionServer)rss).getReplicationSourceService();
  rep.addHFileRefsToQueue(env.getRegionInfo().getTable(), family, pairs);
}
 
Example 2
Source File: GlobalIndexRegionScanner.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public GlobalIndexRegionScanner(RegionScanner innerScanner, final Region region, final Scan scan,
        final RegionCoprocessorEnvironment env) throws IOException {
    super(innerScanner);
    final Configuration config = env.getConfiguration();
    if (scan.getAttribute(BaseScannerRegionObserver.INDEX_REBUILD_PAGING) != null) {
        byte[] pageSizeFromScan =
                scan.getAttribute(BaseScannerRegionObserver.INDEX_REBUILD_PAGE_ROWS);
        if (pageSizeFromScan != null) {
            pageSizeInRows = Bytes.toLong(pageSizeFromScan);
        } else {
            pageSizeInRows =
                    config.getLong(INDEX_REBUILD_PAGE_SIZE_IN_ROWS,
                        QueryServicesOptions.DEFAULT_INDEX_REBUILD_PAGE_SIZE_IN_ROWS);
        }
    }
    maxBatchSize = config.getInt(MUTATE_BATCH_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE);
    indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
    if (indexMetaData == null) {
        indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_MD);
    }
    List<IndexMaintainer> maintainers = IndexMaintainer.deserialize(indexMetaData, true);
    indexMaintainer = maintainers.get(0);
    this.scan = scan;
    this.innerScanner = innerScanner;
    this.region = region;
    // Create the following objects only for rebuilds by IndexTool
    hTableFactory = IndexWriterUtils.getDefaultDelegateHTableFactory(env);
    indexHTable = hTableFactory.getTable(new ImmutableBytesPtr(indexMaintainer.getIndexTableName()));
    indexTableTTL = indexHTable.getTableDescriptor().getColumnFamilies()[0].getTimeToLive();
    pool = new WaitForCompletionTaskRunner(ThreadPoolManager.getExecutor(
            new ThreadPoolBuilder("IndexVerify",
                    env.getConfiguration()).setMaxThread(NUM_CONCURRENT_INDEX_VERIFY_THREADS_CONF_KEY,
                    DEFAULT_CONCURRENT_INDEX_VERIFY_THREADS).setCoreTimeout(
                    INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env));
    rowCountPerTask = config.getInt(INDEX_VERIFY_ROW_COUNTS_PER_TASK_CONF_KEY,
            DEFAULT_INDEX_VERIFY_ROW_COUNTS_PER_TASK);
}
 
Example 3
Source File: PhoenixIndexFailurePolicy.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Check config for whether to disable index on index write failures
 * @param htd
 * @param config
 * @param connection
 * @return The table config for {@link PhoenixIndexFailurePolicy.DISABLE_INDEX_ON_WRITE_FAILURE}
 * @throws SQLException
 */
public static boolean getDisableIndexOnFailure(RegionCoprocessorEnvironment env) {
    TableDescriptor htd = env.getRegion().getTableDescriptor();
    Configuration config = env.getConfiguration();
    String value = htd.getValue(PhoenixIndexFailurePolicy.DISABLE_INDEX_ON_WRITE_FAILURE);
    boolean disableIndexOnFailure;
    if (value == null) {
        disableIndexOnFailure =
                config.getBoolean(QueryServices.INDEX_FAILURE_DISABLE_INDEX,
                    QueryServicesOptions.DEFAULT_INDEX_FAILURE_DISABLE_INDEX);
    } else {
        disableIndexOnFailure = Boolean.parseBoolean(value);
    }
    return disableIndexOnFailure;
}
 
Example 4
Source File: AccessController.java    From hbase with Apache License 2.0 5 votes vote down vote up
private void initialize(RegionCoprocessorEnvironment e) throws IOException {
  final Region region = e.getRegion();
  Configuration conf = e.getConfiguration();
  Map<byte[], ListMultimap<String, UserPermission>> tables = PermissionStorage.loadAll(region);
  // For each table, write out the table's permissions to the respective
  // znode for that table.
  for (Map.Entry<byte[], ListMultimap<String, UserPermission>> t:
    tables.entrySet()) {
    byte[] entry = t.getKey();
    ListMultimap<String, UserPermission> perms = t.getValue();
    byte[] serialized = PermissionStorage.writePermissionsAsBytes(perms, conf);
    zkPermissionWatcher.writeToZookeeper(entry, serialized);
  }
  initialized = true;
}
 
Example 5
Source File: ParallelWriterIndexCommitter.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
@Override
public void setup(IndexWriter parent, RegionCoprocessorEnvironment env, String name) {
  Configuration conf = env.getConfiguration();
  setup(IndexWriterUtils.getDefaultDelegateHTableFactory(env),
    ThreadPoolManager.getExecutor(
      new ThreadPoolBuilder(name, conf).
        setMaxThread(NUM_CONCURRENT_INDEX_WRITER_THREADS_CONF_KEY,
          DEFAULT_CONCURRENT_INDEX_WRITER_THREADS).
        setCoreTimeout(INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env),
    env.getRegionServerServices(), parent, CachingHTableFactory.getCacheSize(conf));
}
 
Example 6
Source File: StatisticsScanner.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public StatisticsScanner(StatisticsCollector tracker, StatisticsWriter stats, RegionCoprocessorEnvironment env,
        InternalScanner delegate, ImmutableBytesPtr family) {
    this.tracker = tracker;
    this.statsWriter = stats;
    this.delegate = delegate;
    this.region = env.getRegion();
    this.env = env;
    this.family = family;
    this.config = env.getConfiguration();
    StatisticsCollectionRunTracker.getInstance(config).addCompactingRegion(region.getRegionInfo());
}
 
Example 7
Source File: PhoenixTransactionalIndexer.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public void start(CoprocessorEnvironment e) throws IOException {
    final RegionCoprocessorEnvironment env = (RegionCoprocessorEnvironment)e;
    String serverName = env.getServerName().getServerName();
    codec = new PhoenixIndexCodec(env.getConfiguration(), env.getRegionInfo().getTable().getName());
    DelegateRegionCoprocessorEnvironment indexWriterEnv = new DelegateRegionCoprocessorEnvironment(env, ConnectionType.INDEX_WRITER_CONNECTION);
    // setup the actual index writer
    // For transactional tables, we keep the index active upon a write failure
    // since we have the all versus none behavior for transactions. Also, we
    // fail on any write exception since this will end up failing the transaction.
    this.writer = new IndexWriter(IndexWriter.getCommitter(indexWriterEnv, ParallelWriterIndexCommitter.class),
            new LeaveIndexActiveFailurePolicy(), indexWriterEnv, serverName + "-tx-index-writer");
}
 
Example 8
Source File: GlobalCache.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static GlobalCache getInstance(RegionCoprocessorEnvironment env) {
    GlobalCache result = INSTANCE;
    if (result == null) {
        synchronized(GlobalCache.class) {
            result = INSTANCE;
            if(result == null) {
                INSTANCE = result = new GlobalCache(env.getConfiguration());
            }
        }
    }
    return result;
}
 
Example 9
Source File: GroupedAggregateRegionObserver.java    From phoenix with Apache License 2.0 5 votes vote down vote up
GroupByCache newCache(RegionCoprocessorEnvironment env, ImmutableBytesPtr tenantId, byte[] customAnnotations, ServerAggregators aggregators, int estDistVals) {
    Configuration conf = env.getConfiguration();
    boolean spillableEnabled =
            conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
    if (spillableEnabled) {
        return new SpillableGroupByCache(env, tenantId, aggregators, estDistVals);
    }

    return new InMemoryGroupByCache(env, tenantId, customAnnotations, aggregators, estDistVals);
}
 
Example 10
Source File: GroupedAggregateRegionObserver.java    From phoenix with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
GroupByCache newCache(RegionCoprocessorEnvironment env, ImmutableBytesWritable tenantId, ServerAggregators aggregators, int estDistVals) {
    Configuration conf = env.getConfiguration();
    boolean spillableEnabled =
            conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
    if (spillableEnabled) {
        return new SpillableGroupByCache(env, tenantId, aggregators, estDistVals);
    } 
    
    return new InMemoryGroupByCache(env, tenantId, aggregators, estDistVals);
}
 
Example 11
Source File: PhoenixMetaDataCoprocessorHost.java    From phoenix with Apache License 2.0 5 votes vote down vote up
PhoenixMetaDataCoprocessorHost(RegionCoprocessorEnvironment env) throws IOException {
    super(null);
    this.env = env;
    this.conf = new Configuration();
    for (Entry<String, String> entry : env.getConfiguration()) {
        conf.set(entry.getKey(), entry.getValue());
    }
    boolean accessCheckEnabled = this.conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
            QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
    if (this.conf.get(PHOENIX_META_DATA_COPROCESSOR_CONF_KEY) == null && accessCheckEnabled) {
        this.conf.set(PHOENIX_META_DATA_COPROCESSOR_CONF_KEY, DEFAULT_PHOENIX_META_DATA_COPROCESSOR_CONF_KEY);
    }
   loadSystemCoprocessors(conf, PHOENIX_META_DATA_COPROCESSOR_CONF_KEY);
}
 
Example 12
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
protected CacheSupplier<TransactionStateCache> getTransactionStateCacheSupplier(RegionCoprocessorEnvironment env) {
  return new TransactionStateCacheSupplier(env.getConfiguration());
}
 
Example 13
Source File: IndexRebuildRegionScanner.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public IndexRebuildRegionScanner(final RegionScanner innerScanner, final Region region, final Scan scan,
                          final RegionCoprocessorEnvironment env,
                          UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver) throws IOException {
    super(innerScanner, region, scan, env);
    final Configuration config = env.getConfiguration();
    if (scan.getAttribute(BaseScannerRegionObserver.INDEX_REBUILD_PAGING) == null) {
        partialRebuild = true;
    }
    maxBatchSizeBytes = config.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,
            QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
    mutations = new UngroupedAggregateRegionObserver.MutationList(maxBatchSize);
    blockingMemstoreSize = UngroupedAggregateRegionObserver.getBlockingMemstoreSize(region, config);
    clientVersionBytes = scan.getAttribute(BaseScannerRegionObserver.CLIENT_VERSION);
    indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
    if (indexMetaData == null) {
        useProto = false;
    }
    familyMap = scan.getFamilyMap();
    if (familyMap.isEmpty()) {
        familyMap = null;
    }
    this.ungroupedAggregateRegionObserver = ungroupedAggregateRegionObserver;
    indexRowKey = scan.getAttribute(BaseScannerRegionObserver.INDEX_ROW_KEY);
    if (indexRowKey != null) {
        setReturnCodeForSingleRowRebuild();
        pageSizeInRows = 1;
    }
    byte[] valueBytes = scan.getAttribute(BaseScannerRegionObserver.INDEX_REBUILD_VERIFY_TYPE);
    if (valueBytes != null) {
        verifyType = IndexTool.IndexVerifyType.fromValue(valueBytes);
        if (verifyType != IndexTool.IndexVerifyType.NONE) {
            verify = true;
            viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
            byte[] disableLoggingValueBytes =
                scan.getAttribute(BaseScannerRegionObserver.INDEX_REBUILD_DISABLE_LOGGING_VERIFY_TYPE);
            if (disableLoggingValueBytes != null) {
                disableLoggingVerifyType =
                    IndexTool.IndexDisableLoggingType.fromValue(disableLoggingValueBytes);
            }
            verificationOutputRepository =
                new IndexVerificationOutputRepository(indexMaintainer.getIndexTableName()
                    , hTableFactory, disableLoggingVerifyType);
            verificationResult = new IndexToolVerificationResult(scan);
            verificationResultRepository =
                new IndexVerificationResultRepository(indexMaintainer.getIndexTableName(), hTableFactory);
            indexKeyToMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
            dataKeyToMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
            pool = new WaitForCompletionTaskRunner(ThreadPoolManager.getExecutor(
                    new ThreadPoolBuilder("IndexVerify",
                            env.getConfiguration()).setMaxThread(NUM_CONCURRENT_INDEX_VERIFY_THREADS_CONF_KEY,
                            DEFAULT_CONCURRENT_INDEX_VERIFY_THREADS).setCoreTimeout(
                            INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env));
            nextStartKey = null;
            minTimestamp = scan.getTimeRange().getMin();
        }
    }
}
 
Example 14
Source File: SpillableGroupByCache.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * Instantiates a Loading LRU Cache that stores key / aggregator[] tuples used for group by queries
 * 
 * @param estSize
 * @param estValueSize
 * @param aggs
 * @param ctxt
 */
public SpillableGroupByCache(final RegionCoprocessorEnvironment env, ImmutableBytesWritable tenantId,
        ServerAggregators aggs, final int estSizeNum) {
    curNumCacheElements = 0;
    this.aggregators = aggs;
    this.env = env;

    final int estValueSize = aggregators.getEstimatedByteSize();
    final TenantCache tenantCache = GlobalCache.getTenantCache(env, tenantId);

    // Compute Map initial map
    final Configuration conf = env.getConfiguration();
    final long maxCacheSizeConf = conf.getLong(GROUPBY_MAX_CACHE_SIZE_ATTRIB, DEFAULT_GROUPBY_MAX_CACHE_MAX);
    final int numSpillFilesConf = conf.getInt(GROUPBY_SPILL_FILES_ATTRIB, DEFAULT_GROUPBY_SPILL_FILES);

    final int maxSizeNum = (int)(maxCacheSizeConf / estValueSize);
    final int minSizeNum = (SPGBY_CACHE_MIN_SIZE / estValueSize);

    // use upper and lower bounds for the cache size
    final int maxCacheSize = Math.max(minSizeNum, Math.min(maxSizeNum, estSizeNum));
    final int estSize = GroupedAggregateRegionObserver.sizeOfUnorderedGroupByMap(maxCacheSize, estValueSize);
    try {
        this.chunk = tenantCache.getMemoryManager().allocate(estSize);
    } catch (InsufficientMemoryException ime) {
        logger.error("Requested Map size exceeds memory limit, please decrease max size via config paramter: "
                + GROUPBY_MAX_CACHE_SIZE_ATTRIB);
        throw ime;
    }

    if (logger.isDebugEnabled()) {
        logger.debug("Instantiating LRU groupby cache of element size: " + maxCacheSize);
    }

    // LRU cache implemented as LinkedHashMap with access order
    cache = new LinkedHashMap<ImmutableBytesWritable, Aggregator[]>(maxCacheSize, 0.75f, true) {
        boolean spill = false;
        int cacheSize = maxCacheSize;

        @Override
        protected boolean removeEldestEntry(Map.Entry<ImmutableBytesWritable, Aggregator[]> eldest) {
            if (!spill && size() > cacheSize) { // increase allocation
                cacheSize *= 1.5f;
                int estSize = GroupedAggregateRegionObserver.sizeOfUnorderedGroupByMap(cacheSize, estValueSize);
                try {
                    chunk.resize(estSize);
                } catch (InsufficientMemoryException im) {
                    // Cannot extend Map anymore, start spilling
                    spill = true;
                }
            }

            if (spill) {
                try {
                    if (spillManager == null) {
                        // Lazy instantiation of spillable data
                        // structures
                        //
                        // Only create spill data structs if LRU
                        // cache is too small
                        spillManager = new SpillManager(numSpillFilesConf, aggregators, env.getConfiguration(),
                                new QueryCache());
                    }
                    spillManager.spill(eldest.getKey(), eldest.getValue());
                    // keep track of elements in cache
                    curNumCacheElements--;
                } catch (IOException ioe) {
                    // Ensure that we always close and delete the temp files
                    try {
                        throw new RuntimeException(ioe);
                    } finally {
                        Closeables.closeQuietly(SpillableGroupByCache.this);
                    }
                }
                return true;
            }

            return false;
        }
    };
}
 
Example 15
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
protected CacheSupplier<TransactionStateCache> getTransactionStateCacheSupplier(RegionCoprocessorEnvironment env) {
  return new TransactionStateCacheSupplier(env.getConfiguration());
}
 
Example 16
Source File: PhoenixTransactionalIndexer.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
        MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {

    Mutation m = miniBatchOp.getOperation(0);
    if (!codec.isEnabled(m)) {
        return;
    }

    PhoenixIndexMetaData indexMetaData = new PhoenixIndexMetaDataBuilder(c.getEnvironment()).getIndexMetaData(miniBatchOp);
    if (    indexMetaData.getClientVersion() >= MetaDataProtocol.MIN_TX_CLIENT_SIDE_MAINTENANCE
        && !indexMetaData.hasLocalIndexes()) { // Still generate index updates server side for local indexes
        return;
    }
    BatchMutateContext context = new BatchMutateContext(indexMetaData.getClientVersion());
    setBatchMutateContext(c, context);
    
    Collection<Pair<Mutation, byte[]>> indexUpdates = null;
    // get the current span, or just use a null-span to avoid a bunch of if statements
    try (TraceScope scope = Trace.startSpan("Starting to build index updates")) {
        Span current = scope.getSpan();
        if (current == null) {
            current = NullSpan.INSTANCE;
        }

        RegionCoprocessorEnvironment env = c.getEnvironment();
        PhoenixTransactionContext txnContext = indexMetaData.getTransactionContext();
        if (txnContext == null) {
            throw new NullPointerException("Expected to find transaction in metadata for " + env.getRegionInfo().getTable().getNameAsString());
        }
        PhoenixTxIndexMutationGenerator generator = new PhoenixTxIndexMutationGenerator(env.getConfiguration(), indexMetaData,
                env.getRegionInfo().getTable().getName(), 
                env.getRegionInfo().getStartKey(), 
                env.getRegionInfo().getEndKey());
        try (Table htable = env.getConnection().getTable(env.getRegionInfo().getTable())) {
            // get the index updates for all elements in this batch
            indexUpdates = generator.getIndexUpdates(htable, getMutationIterator(miniBatchOp));
        }
        byte[] tableName = c.getEnvironment().getRegionInfo().getTable().getName();
        Iterator<Pair<Mutation, byte[]>> indexUpdatesItr = indexUpdates.iterator();
        List<Mutation> localUpdates = new ArrayList<Mutation>(indexUpdates.size());
        while(indexUpdatesItr.hasNext()) {
            Pair<Mutation, byte[]> next = indexUpdatesItr.next();
            if (Bytes.compareTo(next.getSecond(), tableName) == 0) {
                // These mutations will not go through the preDelete hooks, so we
                // must manually convert them here.
                Mutation mutation = TransactionUtil.convertIfDelete(next.getFirst());
                localUpdates.add(mutation);
                indexUpdatesItr.remove();
            }
        }
        if (!localUpdates.isEmpty()) {
            miniBatchOp.addOperationsFromCP(0,
                localUpdates.toArray(new Mutation[localUpdates.size()]));
        }
        if (!indexUpdates.isEmpty()) {
            context.indexUpdates = indexUpdates;
        }

        current.addTimelineAnnotation("Built index updates, doing preStep");
        TracingUtils.addAnnotation(current, "index update count", context.indexUpdates.size());
    } catch (Throwable t) {
        String msg = "Failed to update index with entries:" + indexUpdates;
        LOGGER.error(msg, t);
        ServerUtil.throwIOException(msg, t);
    }
}
 
Example 17
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
protected CacheSupplier<TransactionStateCache> getTransactionStateCacheSupplier(RegionCoprocessorEnvironment env) {
  return new TransactionStateCacheSupplier(env.getConfiguration());
}
 
Example 18
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
protected CacheSupplier<TransactionStateCache> getTransactionStateCacheSupplier(RegionCoprocessorEnvironment env) {
  return new TransactionStateCacheSupplier(env.getConfiguration());
}
 
Example 19
Source File: GroupedAggregateRegionObserver.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Used for an aggregate query in which the key order does not necessarily match the group by
 * key order. In this case, we must collect all distinct groups within a region into a map,
 * aggregating as we go.
 * @param limit TODO
 */
private RegionScanner scanUnordered(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan,
        final RegionScanner scanner, final List<Expression> expressions,
        final ServerAggregators aggregators, long limit) throws IOException {
    if (LOGGER.isDebugEnabled()) {
        LOGGER.debug(LogUtil.addCustomAnnotations(
                "Grouped aggregation over unordered rows with scan " + scan
                + ", group by " + expressions + ", aggregators " + aggregators,
                ScanUtil.getCustomAnnotations(scan)));
    }
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Configuration conf = env.getConfiguration();
    int estDistVals = conf.getInt(GROUPBY_ESTIMATED_DISTINCT_VALUES_ATTRIB, DEFAULT_GROUPBY_ESTIMATED_DISTINCT_VALUES);
    byte[] estDistValsBytes = scan.getAttribute(BaseScannerRegionObserver.ESTIMATED_DISTINCT_VALUES);
    if (estDistValsBytes != null) {
        // Allocate 1.5x estimation
        estDistVals = Math.max(MIN_DISTINCT_VALUES,
                        (int) (Bytes.toInt(estDistValsBytes) * 1.5f));
    }
    
    Pair<Integer, Integer> minMaxQualifiers = EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan);
    boolean useQualifierAsIndex = EncodedColumnsUtil.useQualifierAsIndex(EncodedColumnsUtil.getMinMaxQualifiersFromScan(scan));
    final boolean spillableEnabled =
            conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);
    final PTable.QualifierEncodingScheme encodingScheme = EncodedColumnsUtil.getQualifierEncodingScheme(scan);

    GroupByCache groupByCache =
            GroupByCacheFactory.INSTANCE.newCache(
                    env, ScanUtil.getTenantId(scan), ScanUtil.getCustomAnnotations(scan),
                    aggregators, estDistVals);
    boolean success = false;
    try {
        boolean hasMore;
        Tuple result = useQualifierAsIndex ? new PositionBasedMultiKeyValueTuple() : new MultiKeyValueTuple();
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug(LogUtil.addCustomAnnotations(
                    "Spillable groupby enabled: " + spillableEnabled,
                    ScanUtil.getCustomAnnotations(scan)));
        }
        Region region = c.getEnvironment().getRegion();
        boolean acquiredLock = false;
        try {
            region.startRegionOperation();
            acquiredLock = true;
            synchronized (scanner) {
                do {
                    List<Cell> results = useQualifierAsIndex ? new EncodedColumnQualiferCellsList(minMaxQualifiers.getFirst(), minMaxQualifiers.getSecond(), encodingScheme) : new ArrayList<Cell>();
                    // Results are potentially returned even when the return
                    // value of s.next is false
                    // since this is an indication of whether or not there are
                    // more values after the
                    // ones returned
                    hasMore = scanner.nextRaw(results);
                    if (!results.isEmpty()) {
                        result.setKeyValues(results);
                        ImmutableBytesPtr key =
                            TupleUtil.getConcatenatedValue(result, expressions);
                        Aggregator[] rowAggregators = groupByCache.cache(key);
                        // Aggregate values here
                        aggregators.aggregate(rowAggregators, result);
                    }
                } while (hasMore && groupByCache.size() < limit);
            }
        }  finally {
            if (acquiredLock) region.closeRegionOperation();
        }

        RegionScanner regionScanner = groupByCache.getScanner(scanner);

        // Do not sort here, but sort back on the client instead
        // The reason is that if the scan ever extends beyond a region
        // (which can happen if we're basing our parallelization split
        // points on old metadata), we'll get incorrect query results.
        success = true;
        return regionScanner;
    } finally {
        if (!success) {
            Closeables.closeQuietly(groupByCache);
        }
    }
}
 
Example 20
Source File: TransactionProcessor.java    From phoenix-tephra with Apache License 2.0 4 votes vote down vote up
protected CacheSupplier<TransactionStateCache> getTransactionStateCacheSupplier(RegionCoprocessorEnvironment env) {
  return new TransactionStateCacheSupplier(env.getConfiguration());
}