Java Code Examples for org.apache.phoenix.iterate.ResultIterator#next()

The following examples show how to use org.apache.phoenix.iterate.ResultIterator#next() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LiteralResultIteratorPlanTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void testLiteralResultIteratorPlan(Object[][] expectedResult, Integer offset, Integer limit)
        throws SQLException {

    QueryPlan plan = newLiteralResultIterationPlan(offset, limit);
    ResultIterator iter = plan.iterator();
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    for (Object[] row : expectedResult) {
        Tuple next = iter.next();
        assertNotNull(next);
        for (int i = 0; i < row.length; i++) {
            PColumn column = table.getColumns().get(i);
            boolean eval = new ProjectedColumnExpression(column, table, column.getName().getString()).evaluate(next,
                    ptr);
            Object o = eval ? column.getDataType().toObject(ptr) : null;
            assertEquals(row[i], o);
        }
    }
    assertNull(iter.next());
}
 
Example 2
Source File: AssertResults.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static void assertResults(ResultIterator scanner, AssertingIterator iterator) throws Exception {
    try {
        for (Tuple result = scanner.next(); result != null; result = scanner.next()) {
            iterator.assertNext(result);
        }
        iterator.assertDone();
    } finally {
        scanner.close();
    }
}
 
Example 3
Source File: DeleteCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public MutationState execute() throws SQLException {
    // TODO: share this block of code with UPSERT SELECT
    ImmutableBytesWritable ptr = context.getTempPtr();
    PTable table = dataPlan.getTableRef().getTable();
    table.getIndexMaintainers(ptr, context.getConnection());
    byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
    ServerCache cache = null;
    try {
        if (ptr.getLength() > 0) {
            byte[] uuidValue = ServerCacheClient.generateId();
            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
            context.getScan().setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
            ScanUtil.setClientVersion(context.getScan(), MetaDataProtocol.PHOENIX_VERSION);
        }
        ResultIterator iterator = aggPlan.iterator();
        try {
            Tuple row = iterator.next();
            final long mutationCount = (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
            return new MutationState(maxSize, maxSizeBytes, connection) {
                @Override
                public long getUpdateCount() {
                    return mutationCount;
                }
            };
        } finally {
            iterator.close();
        }
    } finally {
        if (cache != null) {
            cache.close();
        }
    }
}
 
Example 4
Source File: UpsertCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public MutationState execute() throws SQLException {
    ImmutableBytesWritable ptr = context.getTempPtr();
    PTable table = tableRef.getTable();
    table.getIndexMaintainers(ptr, context.getConnection());
    byte[] txState = table.isTransactional() ?
            connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;

    ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION);
    if (aggPlan.getTableRef().getTable().isTransactional() 
            || (table.getType() == PTableType.INDEX && table.isTransactional())) {
        scan.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
    }
    if (ptr.getLength() > 0) {
        byte[] uuidValue = ServerCacheClient.generateId();
        scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
        scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
    }
    ResultIterator iterator = aggPlan.iterator();
    try {
        Tuple row = iterator.next();
        final long mutationCount = (Long) aggProjector.getColumnProjector(0).getValue(row,
                PLong.INSTANCE, ptr);
        return new MutationState(maxSize, maxSizeBytes, connection) {
            @Override
            public long getUpdateCount() {
                return mutationCount;
            }
        };
    } finally {
        iterator.close();
    }

}
 
Example 5
Source File: UpsertCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public MutationState execute() throws SQLException {
    ResultIterator iterator = queryPlan.iterator();
    if (parallelIteratorFactory == null) {
        return upsertSelect(new StatementContext(statement, queryPlan.getContext().getScan()), tableRef, projector, iterator, columnIndexes, pkSlotIndexes, useServerTimestamp, false);
    }
    try {
        parallelIteratorFactory.setRowProjector(projector);
        parallelIteratorFactory.setColumnIndexes(columnIndexes);
        parallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
        Tuple tuple;
        long totalRowCount = 0;
        StatementContext context = queryPlan.getContext();
        while ((tuple=iterator.next()) != null) {// Runs query
            Cell kv = tuple.getValue(0);
            totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
        }
        // Return total number of rows that have been updated. In the case of auto commit being off
        // the mutations will all be in the mutation state of the current connection.
        MutationState mutationState = new MutationState(maxSize, maxSizeBytes, statement.getConnection(), totalRowCount);
        /*
         *  All the metrics collected for measuring the reads done by the parallel mutating iterators
         *  is included in the ReadMetricHolder of the statement context. Include these metrics in the
         *  returned mutation state so they can be published on commit.
         */
        mutationState.setReadMetricQueue(context.getReadMetricsQueue());
        return mutationState;
    } finally {
        iterator.close();
    }
}
 
Example 6
Source File: UnnestArrayPlanTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void testUnnestArrays(PArrayDataType arrayType, List<Object[]> arrays, boolean withOrdinality) throws Exception {
      PDataType baseType = PDataType.fromTypeId(arrayType.getSqlType() - PDataType.ARRAY_TYPE_BASE);
      List<Tuple> tuples = toTuples(arrayType, arrays);
LiteralResultIterationPlan subPlan = new LiteralResultIterationPlan(tuples, CONTEXT, SelectStatement.SELECT_ONE,
		TableRef.EMPTY_TABLE_REF, RowProjector.EMPTY_PROJECTOR, null, null, OrderBy.EMPTY_ORDER_BY, null);
      LiteralExpression dummy = LiteralExpression.newConstant(null, arrayType);
      RowKeyValueAccessor accessor = new RowKeyValueAccessor(Arrays.asList(dummy), 0);
      UnnestArrayPlan plan = new UnnestArrayPlan(subPlan, new RowKeyColumnExpression(dummy, accessor), withOrdinality);
      PName colName = PNameFactory.newName("ELEM");
      PColumn elemColumn = new PColumnImpl(PNameFactory.newName("ELEM"), PNameFactory.newName(VALUE_COLUMN_FAMILY), baseType, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes(),
          HConstants.LATEST_TIMESTAMP);
      colName = PNameFactory.newName("IDX");
      PColumn indexColumn = withOrdinality ? new PColumnImpl(colName, PNameFactory.newName(VALUE_COLUMN_FAMILY), PInteger.INSTANCE, null, null, true, 0, SortOrder.getDefault(), null, null, false, "", false, false, colName.getBytes(),
          HConstants.LATEST_TIMESTAMP) : null;
      List<PColumn> columns = withOrdinality ? Arrays.asList(elemColumn, indexColumn) : Arrays.asList(elemColumn);
      ProjectedColumnExpression elemExpr = new ProjectedColumnExpression(elemColumn, columns, 0, elemColumn.getName().getString());
      ProjectedColumnExpression indexExpr = withOrdinality ? new ProjectedColumnExpression(indexColumn, columns, 1, indexColumn.getName().getString()) : null;
      ImmutableBytesWritable ptr = new ImmutableBytesWritable();
      ResultIterator iterator = plan.iterator();
      for (Object[] o : flatten(arrays)) {
          Tuple tuple = iterator.next();
          assertNotNull(tuple);
          assertTrue(elemExpr.evaluate(tuple, ptr));
          Object elem = baseType.toObject(ptr);
          assertEquals(o[0], elem);
          if (withOrdinality) {
              assertTrue(indexExpr.evaluate(tuple, ptr));
              Object index = PInteger.INSTANCE.toObject(ptr);
              assertEquals(o[1], index);                
          }
      }
      assertNull(iterator.next());
  }
 
Example 7
Source File: CorrelatePlanTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void testCorrelatePlan(Object[][] leftRelation, Object[][] rightRelation, int leftCorrelColumn,
        int rightCorrelColumn, JoinType type, Object[][] expectedResult, Integer offset) throws SQLException {
    TableRef leftTable = createProjectedTableFromLiterals(leftRelation[0]);
    TableRef rightTable = createProjectedTableFromLiterals(rightRelation[0]);
    String varName = "$cor0";
    RuntimeContext runtimeContext = new RuntimeContextImpl();
    runtimeContext.defineCorrelateVariable(varName, leftTable);
    QueryPlan leftPlan = newLiteralResultIterationPlan(leftRelation, offset);
    QueryPlan rightPlan = newLiteralResultIterationPlan(rightRelation, offset);
    Expression columnExpr = new ColumnRef(rightTable, rightCorrelColumn).newColumnExpression();
    Expression fieldAccess = new CorrelateVariableFieldAccessExpression(runtimeContext, varName, new ColumnRef(leftTable, leftCorrelColumn).newColumnExpression());
    Expression filter = ComparisonExpression.create(CompareOp.EQUAL, Arrays.asList(columnExpr, fieldAccess), CONTEXT.getTempPtr(), false);
    rightPlan = new ClientScanPlan(CONTEXT, SelectStatement.SELECT_ONE, rightTable, RowProjector.EMPTY_PROJECTOR,
            null, null, filter, OrderBy.EMPTY_ORDER_BY, rightPlan);
    PTable joinedTable = JoinCompiler.joinProjectedTables(leftTable.getTable(), rightTable.getTable(), type);
    CorrelatePlan correlatePlan = new CorrelatePlan(leftPlan, rightPlan, varName, type, false, runtimeContext, joinedTable, leftTable.getTable(), rightTable.getTable(), leftTable.getTable().getColumns().size());
    ResultIterator iter = correlatePlan.iterator();
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    for (Object[] row : expectedResult) {
        Tuple next = iter.next();
        assertNotNull(next);
        for (int i = 0; i < row.length; i++) {
            PColumn column = joinedTable.getColumns().get(i);
            boolean eval = new ProjectedColumnExpression(column, joinedTable, column.getName().getString()).evaluate(next, ptr);
            Object o = eval ? column.getDataType().toObject(ptr) : null;
            assertEquals(row[i], o);
        }
    }
}
 
Example 8
Source File: AssertResults.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static void assertResults(ResultIterator scanner, AssertingIterator iterator) throws Exception {
    try {
        for (Tuple result = scanner.next(); result != null; result = scanner.next()) {
            iterator.assertNext(result);
        }
        iterator.assertDone();
    } finally {
        scanner.close();
    }
}
 
Example 9
Source File: HashCacheClient.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private void serialize(ImmutableBytesWritable ptr, ResultIterator iterator, long estimatedSize, List<Expression> onExpressions, boolean singleValueOnly, Expression keyRangeRhsExpression, List<Expression> keyRangeRhsValues) throws SQLException {
    long maxSize = serverCache.getConnection().getQueryServices().getProps().getLong(QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE);
    estimatedSize = Math.min(estimatedSize, maxSize);
    if (estimatedSize > Integer.MAX_VALUE) {
        throw new IllegalStateException("Estimated size(" + estimatedSize + ") must not be greater than Integer.MAX_VALUE(" + Integer.MAX_VALUE + ")");
    }
    try {
        TrustedByteArrayOutputStream baOut = new TrustedByteArrayOutputStream((int)estimatedSize);
        DataOutputStream out = new DataOutputStream(baOut);
        // Write onExpressions first, for hash key evaluation along with deserialization
        out.writeInt(onExpressions.size());
        for (Expression expression : onExpressions) {
            WritableUtils.writeVInt(out, ExpressionType.valueOf(expression).ordinal());
            expression.write(out);                
        }
        int exprSize = baOut.size() + Bytes.SIZEOF_INT;
        out.writeInt(exprSize * (singleValueOnly ? -1 : 1));
        int nRows = 0;
        out.writeInt(nRows); // In the end will be replaced with total number of rows            
        ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
        for (Tuple result = iterator.next(); result != null; result = iterator.next()) {
            TupleUtil.write(result, out);
            if (baOut.size() > maxSize) {
                throw new MaxServerCacheSizeExceededException("Size of hash cache (" + baOut.size() + " bytes) exceeds the maximum allowed size (" + maxSize + " bytes)");
            }
            // Evaluate key expressions for hash join key range optimization.
            if (keyRangeRhsExpression != null) {
                keyRangeRhsValues.add(evaluateKeyExpression(keyRangeRhsExpression, result, tempPtr));
            }
            nRows++;
        }
        TrustedByteArrayOutputStream sizeOut = new TrustedByteArrayOutputStream(Bytes.SIZEOF_INT);
        DataOutputStream dataOut = new DataOutputStream(sizeOut);
        try {
            dataOut.writeInt(nRows);
            dataOut.flush();
            byte[] cache = baOut.getBuffer();
            // Replace number of rows written above with the correct value.
            System.arraycopy(sizeOut.getBuffer(), 0, cache, exprSize, sizeOut.size());
            // Reallocate to actual size plus compressed buffer size (which is allocated below)
            int maxCompressedSize = Snappy.maxCompressedLength(baOut.size());
            byte[] compressed = new byte[maxCompressedSize]; // size for worst case
            int compressedSize = Snappy.compress(baOut.getBuffer(), 0, baOut.size(), compressed, 0);
            // Last realloc to size of compressed buffer.
            ptr.set(compressed,0,compressedSize);
        } finally {
            dataOut.close();
        }
    } catch (IOException e) {
        throw ServerUtil.parseServerException(e);
    } finally {
        iterator.close();
    }
}
 
Example 10
Source File: DeleteCompiler.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public MutationState execute() throws SQLException {
    ResultIterator iterator = bestPlan.iterator();
    try {
        // If we're not doing any pre or post processing, we can produce the delete mutations directly
        // in the parallel threads executed for the scan
        if (!hasPreOrPostProcessing) {
            Tuple tuple;
            long totalRowCount = 0;
            if (parallelIteratorFactory != null) {
                parallelIteratorFactory.setQueryPlan(bestPlan);
                parallelIteratorFactory.setOtherTableRefs(otherTableRefs);
                parallelIteratorFactory.setProjectedTableRef(projectedTableRef);
            }
            while ((tuple=iterator.next()) != null) {// Runs query
                Cell kv = tuple.getValue(0);
                totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
            }
            // Return total number of rows that have been deleted from the table. In the case of auto commit being off
            // the mutations will all be in the mutation state of the current connection. We need to divide by the
            // total number of tables we updated as otherwise the client will get an inflated result.
            int totalTablesUpdateClientSide = 1; // data table is always updated
            PTable bestTable = bestPlan.getTableRef().getTable();
            // global immutable tables are also updated client side (but don't double count the data table)
            if (bestPlan != dataPlan && isMaintainedOnClient(bestTable)) {
                totalTablesUpdateClientSide++;
            }
            for (TableRef otherTableRef : otherTableRefs) {
                PTable otherTable = otherTableRef.getTable();
                // Don't double count the data table here (which morphs when it becomes a projected table, hence this check)
                if (projectedTableRef != otherTableRef && isMaintainedOnClient(otherTable)) {
                    totalTablesUpdateClientSide++;
                }
            }
            MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount/totalTablesUpdateClientSide);

            // set the read metrics accumulated in the parent context so that it can be published when the mutations are committed.
            state.setReadMetricQueue(context.getReadMetricsQueue());

            return state;
        } else {
            // Otherwise, we have to execute the query and produce the delete mutations in the single thread
            // producing the query results.
            return deleteRows(context, iterator, bestPlan, projectedTableRef, otherTableRefs);
        }
    } finally {
        iterator.close();
    }
}
 
Example 11
Source File: HashCacheClient.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private void serialize(ImmutableBytesWritable ptr, ResultIterator iterator, long estimatedSize, List<Expression> onExpressions, boolean singleValueOnly, Expression keyRangeRhsExpression, List<Expression> keyRangeRhsValues) throws SQLException {
    long maxSize = serverCache.getConnection().getQueryServices().getProps().getLong(QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE);
    estimatedSize = Math.min(estimatedSize, maxSize);
    if (estimatedSize > Integer.MAX_VALUE) {
        throw new IllegalStateException("Estimated size(" + estimatedSize + ") must not be greater than Integer.MAX_VALUE(" + Integer.MAX_VALUE + ")");
    }
    try {
        TrustedByteArrayOutputStream baOut = new TrustedByteArrayOutputStream((int)estimatedSize);
        DataOutputStream out = new DataOutputStream(baOut);
        // Write onExpressions first, for hash key evaluation along with deserialization
        out.writeInt(onExpressions.size());
        for (Expression expression : onExpressions) {
            WritableUtils.writeVInt(out, ExpressionType.valueOf(expression).ordinal());
            expression.write(out);                
        }
        int exprSize = baOut.size() + Bytes.SIZEOF_INT;
        out.writeInt(exprSize * (singleValueOnly ? -1 : 1));
        int nRows = 0;
        out.writeInt(nRows); // In the end will be replaced with total number of rows            
        ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
        for (Tuple result = iterator.next(); result != null; result = iterator.next()) {
            TupleUtil.write(result, out);
            if (baOut.size() > maxSize) {
                throw new MaxServerCacheSizeExceededException("Size of hash cache (" + baOut.size() + " bytes) exceeds the maximum allowed size (" + maxSize + " bytes)");
            }
            // Evaluate key expressions for hash join key range optimization.
            if (keyRangeRhsExpression != null) {
                keyRangeRhsValues.add(evaluateKeyExpression(keyRangeRhsExpression, result, tempPtr));
            }
            nRows++;
        }
        TrustedByteArrayOutputStream sizeOut = new TrustedByteArrayOutputStream(Bytes.SIZEOF_INT);
        DataOutputStream dataOut = new DataOutputStream(sizeOut);
        try {
            dataOut.writeInt(nRows);
            dataOut.flush();
            byte[] cache = baOut.getBuffer();
            // Replace number of rows written above with the correct value.
            System.arraycopy(sizeOut.getBuffer(), 0, cache, exprSize, sizeOut.size());
            // Reallocate to actual size plus compressed buffer size (which is allocated below)
            int maxCompressedSize = Snappy.maxCompressedLength(baOut.size());
            byte[] compressed = new byte[maxCompressedSize]; // size for worst case
            int compressedSize = Snappy.compress(baOut.getBuffer(), 0, baOut.size(), compressed, 0);
            // Last realloc to size of compressed buffer.
            ptr.set(compressed,0,compressedSize);
        } finally {
            dataOut.close();
        }
    } catch (IOException e) {
        throw ServerUtil.parseServerException(e);
    } finally {
        iterator.close();
    }
}