Java Code Examples for org.apache.hadoop.hbase.client.Scan#getAttribute()

The following examples show how to use org.apache.hadoop.hbase.client.Scan#getAttribute() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: IndexHalfStoreFileReader.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public boolean passesKeyRangeFilter(Scan scan) {
    if (scan.getAttribute(SCAN_START_ROW_SUFFIX) == null) {
        // Scan from compaction.
        return true;
    }
    byte[] startKey = currentRegion.getStartKey();
    byte[] endKey = currentRegion.getEndKey();
    // If the region start key is not the prefix of the scan start row then we can return empty
    // scanners. This is possible during merge where one of the child region scan should not return any
    // results as we go through merged region.
    int prefixLength = scan.getStartRow().length - scan.getAttribute(SCAN_START_ROW_SUFFIX).length;
    if (Bytes.compareTo(scan.getStartRow(), 0, prefixLength,
        (startKey.length == 0 ? new byte[endKey.length] : startKey), 0,
        (startKey.length == 0 ? endKey.length : startKey.length)) != 0) {
        return false;
    }
    return true;
}
 
Example 2
Source File: ScanUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * prefix region start key to the start row/stop row suffix and set as scan boundaries.
 * @param scan
 * @param lowerInclusiveRegionKey
 * @param upperExclusiveRegionKey
 */
public static void setupLocalIndexScan(Scan scan) {
    byte[] prefix = scan.getStartRow().length == 0 ? new byte[scan.getStopRow().length]: scan.getStartRow();
    int prefixLength = scan.getStartRow().length == 0? scan.getStopRow().length: scan.getStartRow().length;
    if(scan.getAttribute(SCAN_START_ROW_SUFFIX)!=null) {
        scan.setStartRow(ScanRanges.prefixKey(scan.getAttribute(SCAN_START_ROW_SUFFIX), 0, prefix, prefixLength));
    }
    if(scan.getAttribute(SCAN_STOP_ROW_SUFFIX)!=null) {
        scan.setStopRow(ScanRanges.prefixKey(scan.getAttribute(SCAN_STOP_ROW_SUFFIX), 0, prefix, prefixLength));
    }
}
 
Example 3
Source File: BaseScannerRegionObserver.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private static void throwIfScanOutOfRegion(Scan scan, Region region) throws DoNotRetryIOException {
    boolean isLocalIndex = ScanUtil.isLocalIndex(scan);
    byte[] lowerInclusiveScanKey = scan.getStartRow();
    byte[] upperExclusiveScanKey = scan.getStopRow();
    byte[] lowerInclusiveRegionKey = region.getRegionInfo().getStartKey();
    byte[] upperExclusiveRegionKey = region.getRegionInfo().getEndKey();
    boolean isStaleRegionBoundaries;
    if (isLocalIndex) {
        // For local indexes we have to abort any scan that was open during a split.
        // We detect that condition as follows:
        // 1. The scanner's stop row has to always match the region's end key.
        // 2. Phoenix sets the SCAN_ACTUAL_START_ROW attribute to the scan's original start row
        //    We cannot directly compare that with the region's start key, but can enforce that
        //    the original start row still falls within the new region.
        byte[] expectedUpperRegionKey =
                scan.getAttribute(EXPECTED_UPPER_REGION_KEY) == null ? scan.getStopRow() : scan
                        .getAttribute(EXPECTED_UPPER_REGION_KEY);

        byte[] actualStartRow = scan.getAttribute(SCAN_ACTUAL_START_ROW);
        isStaleRegionBoundaries = (expectedUpperRegionKey != null &&
                Bytes.compareTo(upperExclusiveRegionKey, expectedUpperRegionKey) != 0) || 
                (actualStartRow != null && Bytes.compareTo(actualStartRow, lowerInclusiveRegionKey) < 0);
    } else {
        isStaleRegionBoundaries = Bytes.compareTo(lowerInclusiveScanKey, lowerInclusiveRegionKey) < 0 ||
                ( Bytes.compareTo(upperExclusiveScanKey, upperExclusiveRegionKey) > 0 && upperExclusiveRegionKey.length != 0) ||
                (upperExclusiveRegionKey.length != 0 && upperExclusiveScanKey.length == 0);
    }
    if (isStaleRegionBoundaries) {
        Exception cause = new StaleRegionBoundaryCacheException(region.getRegionInfo().getTable().getNameAsString());
        throw new DoNotRetryIOException(cause.getMessage(), cause);
    }
    if(isLocalIndex) {
        ScanUtil.setupLocalIndexScan(scan);
    }
}
 
Example 4
Source File: MobUtils.java    From hbase with Apache License 2.0 5 votes vote down vote up
/**
 * Indicates whether it's a reference only scan. The information is set in the attribute
 * "hbase.mob.scan.ref.only" of scan. If it's a ref only scan, only the cells with ref tag are
 * returned.
 * @param scan The current scan.
 * @return True if it's a ref only scan.
 */
public static boolean isRefOnlyScan(Scan scan) {
  byte[] refOnly = scan.getAttribute(MobConstants.MOB_SCAN_REF_ONLY);
  try {
    return refOnly != null && Bytes.toBoolean(refOnly);
  } catch (IllegalArgumentException e) {
    return false;
  }
}
 
Example 5
Source File: ScanUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static byte[] getActualStartRow(Scan localIndexScan, RegionInfo regionInfo) {
    return localIndexScan.getAttribute(SCAN_START_ROW_SUFFIX) == null ? localIndexScan
            .getStartRow() : ScanRanges.prefixKey(localIndexScan.getAttribute(SCAN_START_ROW_SUFFIX), 0 ,
        regionInfo.getStartKey().length == 0 ? new byte[regionInfo.getEndKey().length]
                : regionInfo.getStartKey(),
        regionInfo.getStartKey().length == 0 ? regionInfo.getEndKey().length : regionInfo
                .getStartKey().length);
}
 
Example 6
Source File: ScanUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static ImmutableBytesWritable getTenantId(Scan scan) {
    // Create Scan with special aggregation column over which to aggregate
    byte[] tenantId = scan.getAttribute(PhoenixRuntime.TENANT_ID_ATTRIB);
    if (tenantId == null) {
        return null;
    }
    return new ImmutableBytesWritable(tenantId);
}
 
Example 7
Source File: ScanUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static byte[] getCustomAnnotations(Scan scan) {
	return scan.getAttribute(CUSTOM_ANNOTATIONS);
}
 
Example 8
Source File: EncodedColumnsUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static boolean useNewValueColumnQualifier(Scan s) {
    // null check for backward compatibility
    return s.getAttribute(BaseScannerRegionObserver.USE_NEW_VALUE_COLUMN_QUALIFIER) != null;
}
 
Example 9
Source File: GroupedAggregateRegionObserver.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
protected boolean isRegionObserverFor(Scan scan) {
    return scan.getAttribute(BaseScannerRegionObserver.UNORDERED_GROUP_BY_EXPRESSIONS) != null ||
           scan.getAttribute(BaseScannerRegionObserver.KEY_ORDERED_GROUP_BY_EXPRESSIONS) != null;
}
 
Example 10
Source File: ScanUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static boolean isAnalyzeTable(Scan scan) {
    return scan.getAttribute((BaseScannerRegionObserver.ANALYZE_TABLE)) != null;
}
 
Example 11
Source File: IndexRebuildRegionScanner.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public IndexRebuildRegionScanner(final RegionScanner innerScanner, final Region region, final Scan scan,
                          final RegionCoprocessorEnvironment env,
                          UngroupedAggregateRegionObserver ungroupedAggregateRegionObserver) throws IOException {
    super(innerScanner, region, scan, env);
    final Configuration config = env.getConfiguration();
    if (scan.getAttribute(BaseScannerRegionObserver.INDEX_REBUILD_PAGING) == null) {
        partialRebuild = true;
    }
    maxBatchSizeBytes = config.getLong(MUTATE_BATCH_SIZE_BYTES_ATTRIB,
            QueryServicesOptions.DEFAULT_MUTATE_BATCH_SIZE_BYTES);
    mutations = new UngroupedAggregateRegionObserver.MutationList(maxBatchSize);
    blockingMemstoreSize = UngroupedAggregateRegionObserver.getBlockingMemstoreSize(region, config);
    clientVersionBytes = scan.getAttribute(BaseScannerRegionObserver.CLIENT_VERSION);
    indexMetaData = scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD);
    if (indexMetaData == null) {
        useProto = false;
    }
    familyMap = scan.getFamilyMap();
    if (familyMap.isEmpty()) {
        familyMap = null;
    }
    this.ungroupedAggregateRegionObserver = ungroupedAggregateRegionObserver;
    indexRowKey = scan.getAttribute(BaseScannerRegionObserver.INDEX_ROW_KEY);
    if (indexRowKey != null) {
        setReturnCodeForSingleRowRebuild();
        pageSizeInRows = 1;
    }
    byte[] valueBytes = scan.getAttribute(BaseScannerRegionObserver.INDEX_REBUILD_VERIFY_TYPE);
    if (valueBytes != null) {
        verifyType = IndexTool.IndexVerifyType.fromValue(valueBytes);
        if (verifyType != IndexTool.IndexVerifyType.NONE) {
            verify = true;
            viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
            byte[] disableLoggingValueBytes =
                scan.getAttribute(BaseScannerRegionObserver.INDEX_REBUILD_DISABLE_LOGGING_VERIFY_TYPE);
            if (disableLoggingValueBytes != null) {
                disableLoggingVerifyType =
                    IndexTool.IndexDisableLoggingType.fromValue(disableLoggingValueBytes);
            }
            verificationOutputRepository =
                new IndexVerificationOutputRepository(indexMaintainer.getIndexTableName()
                    , hTableFactory, disableLoggingVerifyType);
            verificationResult = new IndexToolVerificationResult(scan);
            verificationResultRepository =
                new IndexVerificationResultRepository(indexMaintainer.getIndexTableName(), hTableFactory);
            indexKeyToMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
            dataKeyToMutationMap = Maps.newTreeMap(Bytes.BYTES_COMPARATOR);
            pool = new WaitForCompletionTaskRunner(ThreadPoolManager.getExecutor(
                    new ThreadPoolBuilder("IndexVerify",
                            env.getConfiguration()).setMaxThread(NUM_CONCURRENT_INDEX_VERIFY_THREADS_CONF_KEY,
                            DEFAULT_CONCURRENT_INDEX_VERIFY_THREADS).setCoreTimeout(
                            INDEX_WRITER_KEEP_ALIVE_TIME_CONF_KEY), env));
            nextStartKey = null;
            minTimestamp = scan.getTimeRange().getMin();
        }
    }
}
 
Example 12
Source File: UngroupedAggregateRegionObserver.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private RegionScanner collectStats(final RegionScanner innerScanner, StatisticsCollector stats,
        final Region region, final Scan scan, Configuration config) throws IOException {
    StatsCollectionCallable callable =
            new StatsCollectionCallable(stats, region, innerScanner, config, scan);
    byte[] asyncBytes = scan.getAttribute(BaseScannerRegionObserver.RUN_UPDATE_STATS_ASYNC_ATTRIB);
    boolean async = false;
    if (asyncBytes != null) {
        async = Bytes.toBoolean(asyncBytes);
    }
    long rowCount = 0; // in case of async, we report 0 as number of rows updated
    StatisticsCollectionRunTracker statsRunTracker =
            StatisticsCollectionRunTracker.getInstance(config);
    final boolean runUpdateStats = statsRunTracker.addUpdateStatsCommandRegion(region.getRegionInfo(),scan.getFamilyMap().keySet());
    if (runUpdateStats) {
        if (!async) {
            rowCount = callable.call();
        } else {
            statsRunTracker.runTask(callable);
        }
    } else {
        rowCount = CONCURRENT_UPDATE_STATS_ROW_COUNT;
        LOGGER.info("UPDATE STATISTICS didn't run because another UPDATE STATISTICS command was already running on the region "
                + region.getRegionInfo().getRegionNameAsString());
    }
    byte[] rowCountBytes = PLong.INSTANCE.toBytes(Long.valueOf(rowCount));
    final Cell aggKeyValue =
            PhoenixKeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY,
                SINGLE_COLUMN, AGG_TIMESTAMP, rowCountBytes, 0, rowCountBytes.length);
    RegionScanner scanner = new BaseRegionScanner(innerScanner) {
        @Override
        public RegionInfo getRegionInfo() {
            return region.getRegionInfo();
        }

        @Override
        public boolean isFilterDone() {
            return true;
        }

        @Override
        public void close() throws IOException {
            // If we ran/scheduled StatsCollectionCallable the delegate
            // scanner is closed there. Otherwise close it here.
            if (!runUpdateStats) {
                super.close();
            }
        }

        @Override
        public boolean next(List<Cell> results) throws IOException {
            results.add(aggKeyValue);
            return false;
        }

        @Override
        public long getMaxResultSize() {
            return scan.getMaxResultSize();
        }
    };
    return scanner;
}
 
Example 13
Source File: ScanUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static byte[] getCustomAnnotations(Scan scan) {
	return scan.getAttribute(CUSTOM_ANNOTATIONS);
}
 
Example 14
Source File: UngroupedAggregateRegionObserver.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
protected boolean isRegionObserverFor(Scan scan) {
    return scan.getAttribute(BaseScannerRegionObserver.UNGROUPED_AGG) != null;
}
 
Example 15
Source File: ScanUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static boolean isReversed(Scan scan) {
    return scan.getAttribute(BaseScannerRegionObserver.REVERSE_SCAN) != null;
}
 
Example 16
Source File: ScanUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static boolean isAnalyzeTable(Scan scan) {
    return scan.getAttribute((BaseScannerRegionObserver.ANALYZE_TABLE)) != null;
}
 
Example 17
Source File: GroupedAggregateRegionObserver.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Used for an aggregate query in which the key order does not necessarily match the group by
 * key order. In this case, we must collect all distinct groups within a region into a map,
 * aggregating as we go.
 * @param limit TODO
 */
private RegionScanner scanUnordered(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan,
        final RegionScanner s, final List<Expression> expressions,
        final ServerAggregators aggregators, long limit) throws IOException {
    if (logger.isDebugEnabled()) {
        logger.debug(LogUtil.addCustomAnnotations("Grouped aggregation over unordered rows with scan " + scan
                + ", group by " + expressions + ", aggregators " + aggregators, ScanUtil.getCustomAnnotations(scan)));
    }
    RegionCoprocessorEnvironment env = c.getEnvironment();
    Configuration conf = env.getConfiguration();
    int estDistVals = conf.getInt(GROUPBY_ESTIMATED_DISTINCT_VALUES_ATTRIB, DEFAULT_GROUPBY_ESTIMATED_DISTINCT_VALUES);
    byte[] estDistValsBytes = scan.getAttribute(BaseScannerRegionObserver.ESTIMATED_DISTINCT_VALUES);
    if (estDistValsBytes != null) {
        // Allocate 1.5x estimation
        estDistVals = Math.max(MIN_DISTINCT_VALUES, 
                        (int) (Bytes.toInt(estDistValsBytes) * 1.5f));
    }

    final boolean spillableEnabled =
            conf.getBoolean(GROUPBY_SPILLABLE_ATTRIB, DEFAULT_GROUPBY_SPILLABLE);

    GroupByCache groupByCache = 
            GroupByCacheFactory.INSTANCE.newCache(
                    env, ScanUtil.getTenantId(scan), ScanUtil.getCustomAnnotations(scan),
                    aggregators, estDistVals);
    boolean success = false;
    try {
        boolean hasMore;

        MultiKeyValueTuple result = new MultiKeyValueTuple();
        if (logger.isDebugEnabled()) {
            logger.debug(LogUtil.addCustomAnnotations("Spillable groupby enabled: " + spillableEnabled, ScanUtil.getCustomAnnotations(scan)));
        }

        HRegion region = c.getEnvironment().getRegion();
        region.startRegionOperation();
        try {
            do {
                List<Cell> results = new ArrayList<Cell>();
                // Results are potentially returned even when the return
                // value of s.next is false
                // since this is an indication of whether or not there are
                // more values after the
                // ones returned
                hasMore = s.nextRaw(results);
                if (!results.isEmpty()) {
                    result.setKeyValues(results);
                    ImmutableBytesWritable key =
                            TupleUtil.getConcatenatedValue(result, expressions);
                    Aggregator[] rowAggregators = groupByCache.cache(key);
                    // Aggregate values here
                    aggregators.aggregate(rowAggregators, result);
                }
            } while (hasMore && groupByCache.size() < limit);
        } finally {
            region.closeRegionOperation();
        }

        RegionScanner regionScanner = groupByCache.getScanner(s);

        // Do not sort here, but sort back on the client instead
        // The reason is that if the scan ever extends beyond a region
        // (which can happen if we're basing our parallelization split
        // points on old metadata), we'll get incorrect query results.
        success = true;
        return regionScanner;
    } finally {
        if (!success) {
            Closeables.closeQuietly(groupByCache);
        }
    }
}
 
Example 18
Source File: GroupedAggregateRegionObserver.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Replaces the RegionScanner s with a RegionScanner that groups by the key formed by the list
 * of expressions from the scan and returns the aggregated rows of each group. For example,
 * given the following original rows in the RegionScanner: KEY COL1 row1 a row2 b row3 a row4 a
 * the following rows will be returned for COUNT(*): KEY COUNT a 3 b 1 The client is required to
 * do a sort and a final aggregation, since multiple rows with the same key may be returned from
 * different regions.
 */
@Override
protected RegionScanner doPostScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
        Scan scan, RegionScanner s) throws IOException {
    boolean keyOrdered = false;
    byte[] expressionBytes = scan.getAttribute(BaseScannerRegionObserver.UNORDERED_GROUP_BY_EXPRESSIONS);

    if (expressionBytes == null) {
        expressionBytes = scan.getAttribute(BaseScannerRegionObserver.KEY_ORDERED_GROUP_BY_EXPRESSIONS);
        keyOrdered = true;
    }
    int offset = 0;
    if (ScanUtil.isLocalIndex(scan)) {
        /*
         * For local indexes, we need to set an offset on row key expressions to skip
         * the region start key.
         */
        HRegion region = c.getEnvironment().getRegion();
        offset = region.getStartKey().length != 0 ? region.getStartKey().length:region.getEndKey().length;
        ScanUtil.setRowKeyOffset(scan, offset);
    }
    
    List<Expression> expressions = deserializeGroupByExpressions(expressionBytes, 0);
    ServerAggregators aggregators =
            ServerAggregators.deserialize(scan
                    .getAttribute(BaseScannerRegionObserver.AGGREGATORS), c
                    .getEnvironment().getConfiguration());

    RegionScanner innerScanner = s;
    
    byte[] localIndexBytes = scan.getAttribute(LOCAL_INDEX_BUILD);
    List<IndexMaintainer> indexMaintainers = localIndexBytes == null ? null : IndexMaintainer.deserialize(localIndexBytes);
    TupleProjector tupleProjector = null;
    HRegion dataRegion = null;
    byte[][] viewConstants = null;
    ColumnReference[] dataColumns = IndexUtil.deserializeDataTableColumnsToJoin(scan);
    if (ScanUtil.isLocalIndex(scan)) {
        if (dataColumns != null) {
            tupleProjector = IndexUtil.getTupleProjector(scan, dataColumns);
            dataRegion = IndexUtil.getDataRegion(c.getEnvironment());
            viewConstants = IndexUtil.deserializeViewConstantsFromScan(scan);
        }
        ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
        innerScanner =
                getWrappedScanner(c, innerScanner, offset, scan, dataColumns, tupleProjector, 
                        dataRegion, indexMaintainers == null ? null : indexMaintainers.get(0), viewConstants, tempPtr);
    } 

    final TupleProjector p = TupleProjector.deserializeProjectorFromScan(scan);
    final HashJoinInfo j = HashJoinInfo.deserializeHashJoinFromScan(scan);
    if (p != null || j != null) {
        innerScanner =
                new HashJoinRegionScanner(innerScanner, p, j, ScanUtil.getTenantId(scan),
                        c.getEnvironment());
    }

    long limit = Long.MAX_VALUE;
    byte[] limitBytes = scan.getAttribute(GROUP_BY_LIMIT);
    if (limitBytes != null) {
        limit = PInteger.INSTANCE.getCodec().decodeInt(limitBytes, 0, SortOrder.getDefault());
    }
    if (keyOrdered) { // Optimize by taking advantage that the rows are
                      // already in the required group by key order
        return scanOrdered(c, scan, innerScanner, expressions, aggregators, limit);
    } else { // Otherwse, collect them all up in an in memory map
        return scanUnordered(c, scan, innerScanner, expressions, aggregators, limit);
    }
}
 
Example 19
Source File: ScanRegionObserver.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
protected boolean isRegionObserverFor(Scan scan) {
    return scan.getAttribute(BaseScannerRegionObserver.NON_AGGREGATE_QUERY) != null;
}
 
Example 20
Source File: IndexUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static void setScanAttributesForIndexReadRepair(Scan scan, PTable table, PhoenixConnection phoenixConnection) throws SQLException {
    if (table.isTransactional() || table.getType() != PTableType.INDEX) {
        return;
    }
    PTable indexTable = table;
    if (indexTable.getIndexType() != PTable.IndexType.GLOBAL) {
        return;
    }
    String schemaName = indexTable.getParentSchemaName().getString();
    String tableName = indexTable.getParentTableName().getString();
    PTable dataTable;
    try {
        dataTable = PhoenixRuntime.getTable(phoenixConnection, SchemaUtil.getTableName(schemaName, tableName));
    } catch (TableNotFoundException e) {
        // This index table must be being deleted. No need to set the scan attributes
        return;
    }
    // MetaDataClient modifies the index table name for view indexes if the parent view of an index has a child
    // view. This, we need to recreate a PTable object with the correct table name for the rest of this code to work
    if (indexTable.getViewIndexId() != null && indexTable.getName().getString().contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) {
        int lastIndexOf = indexTable.getName().getString().lastIndexOf(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR);
        String indexName = indexTable.getName().getString().substring(lastIndexOf + 1);
        indexTable = PhoenixRuntime.getTable(phoenixConnection, indexName);
    }
    if (!dataTable.getIndexes().contains(indexTable)) {
        return;
    }
    if (scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD) == null) {
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        IndexMaintainer.serialize(dataTable, ptr, Collections.singletonList(indexTable), phoenixConnection);
        scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ByteUtil.copyKeyBytesIfNecessary(ptr));
    }
    scan.setAttribute(BaseScannerRegionObserver.CHECK_VERIFY_COLUMN, TRUE_BYTES);
    scan.setAttribute(BaseScannerRegionObserver.PHYSICAL_DATA_TABLE_NAME, dataTable.getPhysicalName().getBytes());
    IndexMaintainer indexMaintainer = indexTable.getIndexMaintainer(dataTable, phoenixConnection);
    byte[] emptyCF = indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary();
    byte[] emptyCQ = indexMaintainer.getEmptyKeyValueQualifier();
    scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_FAMILY_NAME, emptyCF);
    scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER_NAME, emptyCQ);
    if (scan.getAttribute(BaseScannerRegionObserver.VIEW_CONSTANTS) == null) {
        BaseQueryPlan.serializeViewConstantsIntoScan(scan, dataTable);
    }
    addEmptyColumnToScan(scan, emptyCF, emptyCQ);
}