org.apache.phoenix.iterate.ResultIterator Java Examples

The following examples show how to use org.apache.phoenix.iterate.ResultIterator. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SortMergeJoinPlan.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public BasicJoinIterator(ResultIterator lhsIterator, ResultIterator rhsIterator) {
    this.lhsIterator = lhsIterator;
    this.rhsIterator = rhsIterator;
    this.initialized = false;
    this.lhsTuple = null;
    this.rhsTuple = null;
    this.lhsKey = new JoinKey(lhsKeyExpressions);
    this.rhsKey = new JoinKey(rhsKeyExpressions);
    this.nextLhsTuple = null;
    this.nextRhsTuple = null;
    this.nextLhsKey = new JoinKey(lhsKeyExpressions);
    this.nextRhsKey = new JoinKey(rhsKeyExpressions);
    this.destBitSet = ValueBitSet.newInstance(joinedSchema);
    this.lhsBitSet = ValueBitSet.newInstance(lhsSchema);
    this.rhsBitSet = ValueBitSet.newInstance(rhsSchema);
    lhsBitSet.clear();
    int len = lhsBitSet.getEstimatedLength();
    this.emptyProjectedValue = new byte[len];
    lhsBitSet.toBytes(emptyProjectedValue, 0);
    this.queue = PhoenixQueues.newTupleQueue(spoolingEnabled, thresholdBytes);
    this.queueIterator = null;
}
 
Example #2
Source File: UpsertCompiler.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, PhoenixConnection connection) throws SQLException {
    if (parentContext.getSequenceManager().getSequenceCount() > 0) {
        throw new IllegalStateException("Cannot pipeline upsert when sequence is referenced");
    }
    PhoenixStatement statement = new PhoenixStatement(connection);
    /*
     * We don't want to collect any read metrics within the child context. This is because any read metrics that
     * need to be captured are already getting collected in the parent statement context enclosed in the result
     * iterator being used for reading rows out.
     */
    StatementContext childContext = new StatementContext(statement, false);
    // Clone the row projector as it's not thread safe and would be used simultaneously by
    // multiple threads otherwise.
    return upsertSelect(childContext, tableRef, projector.cloneIfNecessary(), iterator,
            columnIndexes, pkSlotIndexes, useSeverTimestamp, false);
}
 
Example #3
Source File: PhoenixResultSet.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public PhoenixResultSet(ResultIterator resultIterator, RowProjector rowProjector,
        StatementContext ctx) throws SQLException {
    this.rowProjector = rowProjector;
    this.scanner = resultIterator;
    this.context = ctx;
    this.statement = context.getStatement();
    this.readMetricsQueue = context.getReadMetricsQueue();
    this.overAllQueryMetrics = context.getOverallQueryMetrics();
    this.queryLogger = context.getQueryLogger() != null ? context.getQueryLogger() : QueryLogger.NO_OP_INSTANCE;
    this.wildcardIncludesDynamicCols = this.context.getConnection().getQueryServices()
            .getConfiguration().getBoolean(WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB,
                    DEFAULT_WILDCARD_QUERY_DYNAMIC_COLS_ATTRIB);
    if (this.wildcardIncludesDynamicCols) {
        Pair<List<PColumn>, Integer> res = getStaticColsAndStartingPosForDynCols();
        this.staticColumns = res.getFirst();
        this.startPositionForDynamicCols = res.getSecond();
    } else {
        this.staticColumns = null;
        this.startPositionForDynamicCols = 0;
    }
}
 
Example #4
Source File: SortMergeJoinPlan.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public BasicJoinIterator(ResultIterator lhsIterator, ResultIterator rhsIterator) {
    this.lhsIterator = lhsIterator;
    this.rhsIterator = rhsIterator;
    this.initialized = false;
    this.lhsTuple = null;
    this.rhsTuple = null;
    this.lhsKey = new JoinKey(lhsKeyExpressions);
    this.rhsKey = new JoinKey(rhsKeyExpressions);
    this.nextLhsTuple = null;
    this.nextRhsTuple = null;
    this.nextLhsKey = new JoinKey(lhsKeyExpressions);
    this.nextRhsKey = new JoinKey(rhsKeyExpressions);
    this.destBitSet = ValueBitSet.newInstance(joinedSchema);
    this.lhsBitSet = ValueBitSet.newInstance(lhsSchema);
    this.rhsBitSet = ValueBitSet.newInstance(rhsSchema);
    lhsBitSet.clear();
    int len = lhsBitSet.getEstimatedLength();
    this.emptyProjectedValue = new byte[len];
    lhsBitSet.toBytes(emptyProjectedValue, 0);
    int thresholdBytes = context.getConnection().getQueryServices().getProps().getInt(
            QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
    this.queue = new MappedByteBufferTupleQueue(thresholdBytes);
    this.queueIterator = null;
}
 
Example #5
Source File: AggregatePlan.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan, String tableName, QueryPlan plan) throws SQLException {
    /**
     * Sort the result tuples by the GroupBy expressions.
     * When orderByReverse is false,if some GroupBy expression is SortOrder.DESC, then sorted results on that expression are DESC, not ASC.
     * When orderByReverse is true,if some GroupBy expression is SortOrder.DESC, then sorted results on that expression are ASC, not DESC.
     */
    OrderByExpression orderByExpression =
            OrderByExpression.createByCheckIfOrderByReverse(
                    RowKeyExpression.INSTANCE,
                    false,
                    true,
                    this.orderBy == OrderBy.REV_ROW_KEY_ORDER_BY);
    long threshold =
            services.getProps().getLong(QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB,
                    QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES);
    boolean spoolingEnabled =
            services.getProps().getBoolean(
                    QueryServices.CLIENT_ORDERBY_SPOOLING_ENABLED_ATTRIB,
                    QueryServicesOptions.DEFAULT_CLIENT_ORDERBY_SPOOLING_ENABLED);
    return new OrderedResultIterator(scanner,
            Collections.<OrderByExpression> singletonList(orderByExpression),
            spoolingEnabled, threshold);
}
 
Example #6
Source File: SortMergeJoinPlan.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private static SQLException closeIterators(ResultIterator lhsIterator, ResultIterator rhsIterator) {
    SQLException e = null;
    try {
        lhsIterator.close();
    } catch (Throwable e1) {
        e = e1 instanceof SQLException ? (SQLException)e1 : new SQLException(e1);
    }
    try {
        rhsIterator.close();
    } catch (Throwable e2) {
        SQLException e22 = e2 instanceof SQLException ? (SQLException)e2 : new SQLException(e2);
        if (e != null) {
            e.setNextException(e22);
        } else {
            e = e22;
        }
    }
    return e;
}
 
Example #7
Source File: LiteralResultIteratorPlanTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void testLiteralResultIteratorPlan(Object[][] expectedResult, Integer offset, Integer limit)
        throws SQLException {

    QueryPlan plan = newLiteralResultIterationPlan(offset, limit);
    ResultIterator iter = plan.iterator();
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    for (Object[] row : expectedResult) {
        Tuple next = iter.next();
        assertNotNull(next);
        for (int i = 0; i < row.length; i++) {
            PColumn column = table.getColumns().get(i);
            boolean eval = new ProjectedColumnExpression(column, table, column.getName().getString()).evaluate(next,
                    ptr);
            Object o = eval ? column.getDataType().toObject(ptr) : null;
            assertEquals(row[i], o);
        }
    }
    assertNull(iter.next());
}
 
Example #8
Source File: QueryUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static String getExplainPlan(ResultIterator iterator) throws SQLException {
    List<String> steps = Lists.newArrayList();
    iterator.explain(steps);
    StringBuilder buf = new StringBuilder();
    for (String step : steps) {
        buf.append(step);
        buf.append('\n');
    }
    if (buf.length() > 0) {
        buf.setLength(buf.length()-1);
    }
    return buf.toString();
}
 
Example #9
Source File: AssertResults.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static void assertResults(ResultIterator scanner, AssertingIterator iterator) throws Exception {
    try {
        for (Tuple result = scanner.next(); result != null; result = scanner.next()) {
            iterator.assertNext(result);
        }
        iterator.assertDone();
    } finally {
        scanner.close();
    }
}
 
Example #10
Source File: UpsertCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public MutationState execute() throws SQLException {
    ResultIterator iterator = queryPlan.iterator();
    if (parallelIteratorFactory == null) {
        return upsertSelect(new StatementContext(statement, queryPlan.getContext().getScan()), tableRef, projector, iterator, columnIndexes, pkSlotIndexes, useServerTimestamp, false);
    }
    try {
        parallelIteratorFactory.setRowProjector(projector);
        parallelIteratorFactory.setColumnIndexes(columnIndexes);
        parallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
        Tuple tuple;
        long totalRowCount = 0;
        StatementContext context = queryPlan.getContext();
        while ((tuple=iterator.next()) != null) {// Runs query
            Cell kv = tuple.getValue(0);
            totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
        }
        // Return total number of rows that have been updated. In the case of auto commit being off
        // the mutations will all be in the mutation state of the current connection.
        MutationState mutationState = new MutationState(maxSize, maxSizeBytes, statement.getConnection(), totalRowCount);
        /*
         *  All the metrics collected for measuring the reads done by the parallel mutating iterators
         *  is included in the ReadMetricHolder of the statement context. Include these metrics in the
         *  returned mutation state so they can be published on commit.
         */
        mutationState.setReadMetricQueue(context.getReadMetricsQueue());
        return mutationState;
    } finally {
        iterator.close();
    }
}
 
Example #11
Source File: PhoenixDatabaseMetaData.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private TenantColumnFilteringIterator(ResultIterator delegate, RowProjector rowProjector) throws SQLException {
    super(delegate);
    this.rowProjector = rowProjector;
    this.columnFamilyIndex = rowProjector.getColumnIndex(COLUMN_FAMILY);
    this.columnNameIndex = rowProjector.getColumnIndex(COLUMN_NAME);
    this.multiTenantIndex = rowProjector.getColumnIndex(MULTI_TENANT);
    this.keySeqIndex = rowProjector.getColumnIndex(KEY_SEQ);
}
 
Example #12
Source File: TraceQueryPlan.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
    final PhoenixConnection conn = stmt.getConnection();
    if (conn.getTraceScope() == null && !traceStatement.isTraceOn()) {
        return ResultIterator.EMPTY_ITERATOR;
    }
    return new TraceQueryResultIterator(conn);
}
 
Example #13
Source File: AggregatePlan.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan) throws SQLException {
    Expression expression = RowKeyExpression.INSTANCE;
    OrderByExpression orderByExpression = new OrderByExpression(expression, false, true);
    int threshold = services.getProps().getInt(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
    return new OrderedResultIterator(scanner, Collections.<OrderByExpression>singletonList(orderByExpression), threshold);
}
 
Example #14
Source File: DeleteCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, PhoenixConnection connection) throws SQLException {
    PhoenixStatement statement = new PhoenixStatement(connection);
    /*
     * We don't want to collect any read metrics within the child context. This is because any read metrics that
     * need to be captured are already getting collected in the parent statement context enclosed in the result
     * iterator being used for reading rows out.
     */
    StatementContext context = new StatementContext(statement, false);
    MutationState state = deleteRows(context, iterator, queryPlan, projectedTableRef, otherTableRefs);
    return state;
}
 
Example #15
Source File: SortMergeJoinPlan.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public SemiAntiJoinIterator(ResultIterator lhsIterator, ResultIterator rhsIterator) {
    if (type != JoinType.Semi && type != JoinType.Anti) throw new IllegalArgumentException("Type " + type + " is not allowed by " + SemiAntiJoinIterator.class.getName());
    this.lhsIterator = lhsIterator;
    this.rhsIterator = rhsIterator;
    this.isSemi = type == JoinType.Semi;
    this.initialized = false;
    this.lhsTuple = null;
    this.rhsTuple = null;
    this.lhsKey = new JoinKey(lhsKeyExpressions);
    this.rhsKey = new JoinKey(rhsKeyExpressions);
}
 
Example #16
Source File: UpsertCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
protected MutationState mutate(StatementContext context, ResultIterator iterator, PhoenixConnection connection) throws SQLException {
    if (context.getSequenceManager().getSequenceCount() > 0) {
        throw new IllegalStateException("Cannot pipeline upsert when sequence is referenced");
    }
    PhoenixStatement statement = new PhoenixStatement(connection);
    // Clone the row projector as it's not thread safe and would be used simultaneously by
    // multiple threads otherwise.
    return upsertSelect(statement, tableRef, projector.cloneIfNecessary(), iterator, columnIndexes, pkSlotIndexes);
}
 
Example #17
Source File: DeleteCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public MutationState execute() throws SQLException {
    // TODO: share this block of code with UPSERT SELECT
    ImmutableBytesWritable ptr = context.getTempPtr();
    PTable table = dataPlan.getTableRef().getTable();
    table.getIndexMaintainers(ptr, context.getConnection());
    byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
    ServerCache cache = null;
    try {
        if (ptr.getLength() > 0) {
            byte[] uuidValue = ServerCacheClient.generateId();
            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
            context.getScan().setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
            ScanUtil.setClientVersion(context.getScan(), MetaDataProtocol.PHOENIX_VERSION);
        }
        ResultIterator iterator = aggPlan.iterator();
        try {
            Tuple row = iterator.next();
            final long mutationCount = (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
            return new MutationState(maxSize, maxSizeBytes, connection) {
                @Override
                public long getUpdateCount() {
                    return mutationCount;
                }
            };
        } finally {
            iterator.close();
        }
    } finally {
        if (cache != null) {
            cache.close();
        }
    }
}
 
Example #18
Source File: UpsertCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public MutationState execute() throws SQLException {
    ImmutableBytesWritable ptr = context.getTempPtr();
    PTable table = tableRef.getTable();
    table.getIndexMaintainers(ptr, context.getConnection());
    byte[] txState = table.isTransactional() ?
            connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;

    ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION);
    if (aggPlan.getTableRef().getTable().isTransactional() 
            || (table.getType() == PTableType.INDEX && table.isTransactional())) {
        scan.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
    }
    if (ptr.getLength() > 0) {
        byte[] uuidValue = ServerCacheClient.generateId();
        scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
        scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
    }
    ResultIterator iterator = aggPlan.iterator();
    try {
        Tuple row = iterator.next();
        final long mutationCount = (Long) aggProjector.getColumnProjector(0).getValue(row,
                PLong.INSTANCE, ptr);
        return new MutationState(maxSize, maxSizeBytes, connection) {
            @Override
            public long getUpdateCount() {
                return mutationCount;
            }
        };
    } finally {
        iterator.close();
    }

}
 
Example #19
Source File: HashCacheClient.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Send the results of scanning through the scanner to all
 * region servers for regions of the table that will use the cache
 * that intersect with the minMaxKeyRange.
 * @param scanner scanner for the table or intermediate results being cached
 * @return client-side {@link ServerCache} representing the added hash cache
 * @throws SQLException 
 * @throws MaxServerCacheSizeExceededException if size of hash cache exceeds max allowed
 * size
 */
public ServerCache addHashCache(
        ScanRanges keyRanges, byte[] cacheId, ResultIterator iterator, long estimatedSize, List<Expression> onExpressions,
        boolean singleValueOnly, boolean usePersistentCache, PTable cacheUsingTable, Expression keyRangeRhsExpression,
        List<Expression> keyRangeRhsValues) throws SQLException {
    /**
     * Serialize and compress hashCacheTable
     */
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    serialize(ptr, iterator, estimatedSize, onExpressions, singleValueOnly, keyRangeRhsExpression, keyRangeRhsValues);
    ServerCache cache = serverCache.addServerCache(keyRanges, cacheId, ptr, ByteUtil.EMPTY_BYTE_ARRAY, new HashCacheFactory(), cacheUsingTable, usePersistentCache, true);
    return cache;
}
 
Example #20
Source File: CursorFetchPlan.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
	StatementContext context = delegate.getContext();
	if (resultIterator == null) {
		context.getOverallQueryMetrics().startQuery();
		resultIterator = new CursorResultIterator(LookAheadResultIterator.wrap(delegate.iterator(scanGrouper, scan)),cursorName);
	}
    return resultIterator;
}
 
Example #21
Source File: UnionPlan.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public ExplainPlan getExplainPlan() throws SQLException {
    List<String> steps = new ArrayList<String>();
    steps.add("UNION ALL OVER " + this.plans.size() + " QUERIES");
    ResultIterator iterator = iterator();
    iterator.explain(steps);
    // Indent plans steps nested under union, except last client-side merge/concat step (if there is one)
    int offset = !orderBy.getOrderByExpressions().isEmpty() && limit != null ? 2 : limit != null ? 1 : 0;
    for (int i = 1 ; i < steps.size()-offset; i++) {
        steps.set(i, "    " + steps.get(i));
    }
    return new ExplainPlan(steps);
}
 
Example #22
Source File: SortMergeJoinPlan.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public SemiAntiJoinIterator(ResultIterator lhsIterator, ResultIterator rhsIterator) {
    if (joinType != JoinType.Semi && joinType != JoinType.Anti) {
        throw new IllegalArgumentException("Type " + joinType + " is not allowed by " + SemiAntiJoinIterator.class.getName());
    }
    this.lhsIterator = lhsIterator;
    this.rhsIterator = rhsIterator;
    this.isSemi = joinType == JoinType.Semi;
    this.initialized = false;
    this.lhsTuple = null;
    this.rhsTuple = null;
    this.lhsKey = new JoinKey(lhsKeyExpressions);
    this.rhsKey = new JoinKey(rhsKeyExpressions);
}
 
Example #23
Source File: UnnestArrayPlan.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public UnnestArrayResultIterator(ResultIterator iterator) {
    super(iterator);
    this.elemRefExpression = new UnnestArrayElemRefExpression(arrayExpression);
    this.elemIndexExpression = withOrdinality ? new UnnestArrayElemIndexExpression() : null;
    this.projector = new TupleProjector(withOrdinality ? new Expression[] {elemRefExpression, elemIndexExpression} : new Expression[] {elemRefExpression});
    this.arrayPtr = new ImmutableBytesWritable();
    this.length = 0;
    this.index = 0;
    this.closed = false;
}
 
Example #24
Source File: BaseQueryPlan.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private ResultIterator getWrappedIterator(final Map<ImmutableBytesPtr,ServerCache> dependencies,
		ResultIterator iterator) {
	ResultIterator wrappedIterator = dependencies.isEmpty() ? iterator : new DelegateResultIterator(iterator) {
		@Override
		public void close() throws SQLException {
			try {
				super.close();
			} finally {
				SQLCloseables.closeAll(dependencies.values());
			}
		}
	};
	return wrappedIterator;
}
 
Example #25
Source File: BaseQueryPlan.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public ExplainPlan getExplainPlan() throws SQLException {
    if (context.getScanRanges() == ScanRanges.NOTHING) {
        return new ExplainPlan(Collections.singletonList("DEGENERATE SCAN OVER " + getTableRef().getTable().getName().getString()));
    }

    ResultIterator iterator = iterator();
    ExplainPlan explainPlan = new ExplainPlan(getPlanSteps(iterator));
    iterator.close();
    return explainPlan;
}
 
Example #26
Source File: QueryUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static String getExplainPlan(ResultIterator iterator) throws SQLException {
    List<String> steps = Lists.newArrayList();
    iterator.explain(steps);
    StringBuilder buf = new StringBuilder();
    for (String step : steps) {
        buf.append(step);
        buf.append('\n');
    }
    if (buf.length() > 0) {
        buf.setLength(buf.length()-1);
    }
    return buf.toString();
}
 
Example #27
Source File: UnnestArrayPlanTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void testUnnestArrays(PArrayDataType arrayType, List<Object[]> arrays, boolean withOrdinality) throws Exception {
      PDataType baseType = PDataType.fromTypeId(arrayType.getSqlType() - PDataType.ARRAY_TYPE_BASE);
      List<Tuple> tuples = toTuples(arrayType, arrays);
LiteralResultIterationPlan subPlan = new LiteralResultIterationPlan(tuples, CONTEXT, SelectStatement.SELECT_ONE,
		TableRef.EMPTY_TABLE_REF, RowProjector.EMPTY_PROJECTOR, null, null, OrderBy.EMPTY_ORDER_BY, null);
      LiteralExpression dummy = LiteralExpression.newConstant(null, arrayType);
      RowKeyValueAccessor accessor = new RowKeyValueAccessor(Arrays.asList(dummy), 0);
      UnnestArrayPlan plan = new UnnestArrayPlan(subPlan, new RowKeyColumnExpression(dummy, accessor), withOrdinality);
      PName colName = PNameFactory.newName("ELEM");
      PColumn elemColumn = new PColumnImpl(PNameFactory.newName("ELEM"), PNameFactory.newName(VALUE_COLUMN_FAMILY), baseType, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes(),
          HConstants.LATEST_TIMESTAMP);
      colName = PNameFactory.newName("IDX");
      PColumn indexColumn = withOrdinality ? new PColumnImpl(colName, PNameFactory.newName(VALUE_COLUMN_FAMILY), PInteger.INSTANCE, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes(),
          HConstants.LATEST_TIMESTAMP) : null;
      List<PColumn> columns = withOrdinality ? Arrays.asList(elemColumn, indexColumn) : Arrays.asList(elemColumn);
      ProjectedColumnExpression elemExpr = new ProjectedColumnExpression(elemColumn, columns, 0, elemColumn.getName().getString());
      ProjectedColumnExpression indexExpr = withOrdinality ? new ProjectedColumnExpression(indexColumn, columns, 1, indexColumn.getName().getString()) : null;
      ImmutableBytesWritable ptr = new ImmutableBytesWritable();
      ResultIterator iterator = plan.iterator();
      for (Object[] o : flatten(arrays)) {
          Tuple tuple = iterator.next();
          assertNotNull(tuple);
          assertTrue(elemExpr.evaluate(tuple, ptr));
          Object elem = baseType.toObject(ptr);
          assertEquals(o[0], elem);
          if (withOrdinality) {
              assertTrue(indexExpr.evaluate(tuple, ptr));
              Object index = PInteger.INSTANCE.toObject(ptr);
              assertEquals(o[1], index);                
          }
      }
      assertNull(iterator.next());
  }
 
Example #28
Source File: PhoenixStatement.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public QueryPlan compilePlan(PhoenixStatement stmt, Sequence.ValueOp seqAction) throws SQLException {
    CompilableStatement compilableStmt = getStatement();
    final StatementPlan plan = compilableStmt.compilePlan(stmt, Sequence.ValueOp.VALIDATE_SEQUENCE);
    List<String> planSteps = plan.getExplainPlan().getPlanSteps();
    List<Tuple> tuples = Lists.newArrayListWithExpectedSize(planSteps.size());
    for (String planStep : planSteps) {
        Tuple tuple = new SingleKeyValueTuple(KeyValueUtil.newKeyValue(PVarchar.INSTANCE.toBytes(planStep), EXPLAIN_PLAN_FAMILY, EXPLAIN_PLAN_COLUMN, MetaDataProtocol.MIN_TABLE_TIMESTAMP, ByteUtil.EMPTY_BYTE_ARRAY));
        tuples.add(tuple);
    }
    final ResultIterator iterator = new MaterializedResultIterator(tuples);
    return new QueryPlan() {

        @Override
        public ParameterMetaData getParameterMetaData() {
            return PhoenixParameterMetaData.EMPTY_PARAMETER_META_DATA;
        }

        @Override
        public ExplainPlan getExplainPlan() throws SQLException {
            return new ExplainPlan(Collections.singletonList("EXPLAIN PLAN"));
        }

        @Override
        public ResultIterator iterator() throws SQLException {
            return iterator;
        }

        @Override
        public long getEstimatedSize() {
            return 0;
        }

        @Override
        public TableRef getTableRef() {
            return null;
        }

        @Override
        public RowProjector getProjector() {
            return EXPLAIN_PLAN_ROW_PROJECTOR;
        }

        @Override
        public Integer getLimit() {
            return null;
        }

        @Override
        public OrderBy getOrderBy() {
            return OrderBy.EMPTY_ORDER_BY;
        }

        @Override
        public GroupBy getGroupBy() {
            return GroupBy.EMPTY_GROUP_BY;
        }

        @Override
        public List<KeyRange> getSplits() {
            return Collections.emptyList();
        }

        @Override
        public List<List<Scan>> getScans() {
            return Collections.emptyList();
        }

        @Override
        public StatementContext getContext() {
            return plan.getContext();
        }

        @Override
        public FilterableStatement getStatement() {
            return null;
        }

        @Override
        public boolean isDegenerate() {
            return false;
        }

        @Override
        public boolean isRowKeyOrdered() {
            return true;
        }
        
    };
}
 
Example #29
Source File: UnionPlan.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public ResultIterator iterator() throws SQLException {
    return iterator(DefaultParallelScanGrouper.getInstance());
}
 
Example #30
Source File: SortMergeJoinPlan.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
    return iterator(scanGrouper, null);
}