Java Code Examples for org.apache.phoenix.iterate.ResultIterator#close()

The following examples show how to use org.apache.phoenix.iterate.ResultIterator#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SortMergeJoinPlan.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private static SQLException closeIterators(ResultIterator lhsIterator, ResultIterator rhsIterator) {
    SQLException e = null;
    try {
        lhsIterator.close();
    } catch (Throwable e1) {
        e = e1 instanceof SQLException ? (SQLException)e1 : new SQLException(e1);
    }
    try {
        rhsIterator.close();
    } catch (Throwable e2) {
        SQLException e22 = e2 instanceof SQLException ? (SQLException)e2 : new SQLException(e2);
        if (e != null) {
            e.setNextException(e22);
        } else {
            e = e22;
        }
    }
    return e;
}
 
Example 2
Source File: AssertResults.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static void assertResults(ResultIterator scanner, AssertingIterator iterator) throws Exception {
    try {
        for (Tuple result = scanner.next(); result != null; result = scanner.next()) {
            iterator.assertNext(result);
        }
        iterator.assertDone();
    } finally {
        scanner.close();
    }
}
 
Example 3
Source File: DeleteCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public MutationState execute() throws SQLException {
    // TODO: share this block of code with UPSERT SELECT
    ImmutableBytesWritable ptr = context.getTempPtr();
    PTable table = dataPlan.getTableRef().getTable();
    table.getIndexMaintainers(ptr, context.getConnection());
    byte[] txState = table.isTransactional() ? connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;
    ServerCache cache = null;
    try {
        if (ptr.getLength() > 0) {
            byte[] uuidValue = ServerCacheClient.generateId();
            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
            context.getScan().setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
            context.getScan().setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
            ScanUtil.setClientVersion(context.getScan(), MetaDataProtocol.PHOENIX_VERSION);
        }
        ResultIterator iterator = aggPlan.iterator();
        try {
            Tuple row = iterator.next();
            final long mutationCount = (Long) projector.getColumnProjector(0).getValue(row, PLong.INSTANCE, ptr);
            return new MutationState(maxSize, maxSizeBytes, connection) {
                @Override
                public long getUpdateCount() {
                    return mutationCount;
                }
            };
        } finally {
            iterator.close();
        }
    } finally {
        if (cache != null) {
            cache.close();
        }
    }
}
 
Example 4
Source File: UpsertCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public MutationState execute() throws SQLException {
    ImmutableBytesWritable ptr = context.getTempPtr();
    PTable table = tableRef.getTable();
    table.getIndexMaintainers(ptr, context.getConnection());
    byte[] txState = table.isTransactional() ?
            connection.getMutationState().encodeTransaction() : ByteUtil.EMPTY_BYTE_ARRAY;

    ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION);
    if (aggPlan.getTableRef().getTable().isTransactional() 
            || (table.getType() == PTableType.INDEX && table.isTransactional())) {
        scan.setAttribute(BaseScannerRegionObserver.TX_STATE, txState);
    }
    if (ptr.getLength() > 0) {
        byte[] uuidValue = ServerCacheClient.generateId();
        scan.setAttribute(PhoenixIndexCodec.INDEX_UUID, uuidValue);
        scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ptr.get());
    }
    ResultIterator iterator = aggPlan.iterator();
    try {
        Tuple row = iterator.next();
        final long mutationCount = (Long) aggProjector.getColumnProjector(0).getValue(row,
                PLong.INSTANCE, ptr);
        return new MutationState(maxSize, maxSizeBytes, connection) {
            @Override
            public long getUpdateCount() {
                return mutationCount;
            }
        };
    } finally {
        iterator.close();
    }

}
 
Example 5
Source File: UpsertCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public MutationState execute() throws SQLException {
    ResultIterator iterator = queryPlan.iterator();
    if (parallelIteratorFactory == null) {
        return upsertSelect(new StatementContext(statement, queryPlan.getContext().getScan()), tableRef, projector, iterator, columnIndexes, pkSlotIndexes, useServerTimestamp, false);
    }
    try {
        parallelIteratorFactory.setRowProjector(projector);
        parallelIteratorFactory.setColumnIndexes(columnIndexes);
        parallelIteratorFactory.setPkSlotIndexes(pkSlotIndexes);
        Tuple tuple;
        long totalRowCount = 0;
        StatementContext context = queryPlan.getContext();
        while ((tuple=iterator.next()) != null) {// Runs query
            Cell kv = tuple.getValue(0);
            totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
        }
        // Return total number of rows that have been updated. In the case of auto commit being off
        // the mutations will all be in the mutation state of the current connection.
        MutationState mutationState = new MutationState(maxSize, maxSizeBytes, statement.getConnection(), totalRowCount);
        /*
         *  All the metrics collected for measuring the reads done by the parallel mutating iterators
         *  is included in the ReadMetricHolder of the statement context. Include these metrics in the
         *  returned mutation state so they can be published on commit.
         */
        mutationState.setReadMetricQueue(context.getReadMetricsQueue());
        return mutationState;
    } finally {
        iterator.close();
    }
}
 
Example 6
Source File: BaseQueryPlan.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public ExplainPlan getExplainPlan() throws SQLException {
    if (context.getScanRanges() == ScanRanges.NOTHING) {
        return new ExplainPlan(Collections.singletonList("DEGENERATE SCAN OVER " + getTableRef().getTable().getName().getString()));
    }

    ResultIterator iterator = iterator();
    ExplainPlan explainPlan = new ExplainPlan(getPlanSteps(iterator));
    iterator.close();
    return explainPlan;
}
 
Example 7
Source File: AssertResults.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static void assertResults(ResultIterator scanner, AssertingIterator iterator) throws Exception {
    try {
        for (Tuple result = scanner.next(); result != null; result = scanner.next()) {
            iterator.assertNext(result);
        }
        iterator.assertDone();
    } finally {
        scanner.close();
    }
}
 
Example 8
Source File: UpsertCompiler.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private static MutationState upsertSelect(PhoenixStatement statement, 
        TableRef tableRef, RowProjector projector, ResultIterator iterator, int[] columnIndexes,
        int[] pkSlotIndexes) throws SQLException {
    try {
        PhoenixConnection connection = statement.getConnection();
        ConnectionQueryServices services = connection.getQueryServices();
        int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
        int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
        boolean isAutoCommit = connection.getAutoCommit();
        byte[][] values = new byte[columnIndexes.length][];
        int rowCount = 0;
        Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutation = Maps.newHashMapWithExpectedSize(batchSize);
        PTable table = tableRef.getTable();
        ResultSet rs = new PhoenixResultSet(iterator, projector, statement);
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        while (rs.next()) {
            for (int i = 0; i < values.length; i++) {
                PColumn column = table.getColumns().get(columnIndexes[i]);
                byte[] bytes = rs.getBytes(i+1);
                ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes);
                Object value = rs.getObject(i+1);
                int rsPrecision = rs.getMetaData().getPrecision(i+1);
                Integer precision = rsPrecision == 0 ? null : rsPrecision;
                int rsScale = rs.getMetaData().getScale(i+1);
                Integer scale = rsScale == 0 ? null : rsScale;
                // We are guaranteed that the two column will have compatible types,
                // as we checked that before.
                if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(),
                        precision, scale,
                        column.getMaxLength(),column.getScale())) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY)
                        .setColumnName(column.getName().getString())
                        .setMessage("value=" + column.getDataType().toStringLiteral(ptr, null)).build().buildException();
                }
                column.getDataType().coerceBytes(ptr, value, column.getDataType(),
                        precision, scale, SortOrder.getDefault(),
                        column.getMaxLength(), column.getScale(), column.getSortOrder());
                values[i] = ByteUtil.copyKeyBytesIfNecessary(ptr);
            }
            setValues(values, pkSlotIndexes, columnIndexes, table, mutation);
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (isAutoCommit && rowCount % batchSize == 0) {
                MutationState state = new MutationState(tableRef, mutation, 0, maxSize, connection);
                connection.getMutationState().join(state);
                connection.commit();
                mutation.clear();
            }
        }
        // If auto commit is true, this last batch will be committed upon return
        return new MutationState(tableRef, mutation, rowCount / batchSize * batchSize, maxSize, connection);
    } finally {
        iterator.close();
    }
}
 
Example 9
Source File: HashCacheClient.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private void serialize(ImmutableBytesWritable ptr, ResultIterator iterator, long estimatedSize, List<Expression> onExpressions, boolean singleValueOnly, Expression keyRangeRhsExpression, List<Expression> keyRangeRhsValues) throws SQLException {
    long maxSize = serverCache.getConnection().getQueryServices().getProps().getLong(QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE);
    estimatedSize = Math.min(estimatedSize, maxSize);
    if (estimatedSize > Integer.MAX_VALUE) {
        throw new IllegalStateException("Estimated size(" + estimatedSize + ") must not be greater than Integer.MAX_VALUE(" + Integer.MAX_VALUE + ")");
    }
    try {
        TrustedByteArrayOutputStream baOut = new TrustedByteArrayOutputStream((int)estimatedSize);
        DataOutputStream out = new DataOutputStream(baOut);
        // Write onExpressions first, for hash key evaluation along with deserialization
        out.writeInt(onExpressions.size());
        for (Expression expression : onExpressions) {
            WritableUtils.writeVInt(out, ExpressionType.valueOf(expression).ordinal());
            expression.write(out);                
        }
        int exprSize = baOut.size() + Bytes.SIZEOF_INT;
        out.writeInt(exprSize * (singleValueOnly ? -1 : 1));
        int nRows = 0;
        out.writeInt(nRows); // In the end will be replaced with total number of rows            
        ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
        for (Tuple result = iterator.next(); result != null; result = iterator.next()) {
            TupleUtil.write(result, out);
            if (baOut.size() > maxSize) {
                throw new MaxServerCacheSizeExceededException("Size of hash cache (" + baOut.size() + " bytes) exceeds the maximum allowed size (" + maxSize + " bytes)");
            }
            // Evaluate key expressions for hash join key range optimization.
            if (keyRangeRhsExpression != null) {
                keyRangeRhsValues.add(evaluateKeyExpression(keyRangeRhsExpression, result, tempPtr));
            }
            nRows++;
        }
        TrustedByteArrayOutputStream sizeOut = new TrustedByteArrayOutputStream(Bytes.SIZEOF_INT);
        DataOutputStream dataOut = new DataOutputStream(sizeOut);
        try {
            dataOut.writeInt(nRows);
            dataOut.flush();
            byte[] cache = baOut.getBuffer();
            // Replace number of rows written above with the correct value.
            System.arraycopy(sizeOut.getBuffer(), 0, cache, exprSize, sizeOut.size());
            // Reallocate to actual size plus compressed buffer size (which is allocated below)
            int maxCompressedSize = Snappy.maxCompressedLength(baOut.size());
            byte[] compressed = new byte[maxCompressedSize]; // size for worst case
            int compressedSize = Snappy.compress(baOut.getBuffer(), 0, baOut.size(), compressed, 0);
            // Last realloc to size of compressed buffer.
            ptr.set(compressed,0,compressedSize);
        } finally {
            dataOut.close();
        }
    } catch (IOException e) {
        throw ServerUtil.parseServerException(e);
    } finally {
        iterator.close();
    }
}
 
Example 10
Source File: DeleteCompiler.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public MutationState execute() throws SQLException {
    ResultIterator iterator = bestPlan.iterator();
    try {
        // If we're not doing any pre or post processing, we can produce the delete mutations directly
        // in the parallel threads executed for the scan
        if (!hasPreOrPostProcessing) {
            Tuple tuple;
            long totalRowCount = 0;
            if (parallelIteratorFactory != null) {
                parallelIteratorFactory.setQueryPlan(bestPlan);
                parallelIteratorFactory.setOtherTableRefs(otherTableRefs);
                parallelIteratorFactory.setProjectedTableRef(projectedTableRef);
            }
            while ((tuple=iterator.next()) != null) {// Runs query
                Cell kv = tuple.getValue(0);
                totalRowCount += PLong.INSTANCE.getCodec().decodeLong(kv.getValueArray(), kv.getValueOffset(), SortOrder.getDefault());
            }
            // Return total number of rows that have been deleted from the table. In the case of auto commit being off
            // the mutations will all be in the mutation state of the current connection. We need to divide by the
            // total number of tables we updated as otherwise the client will get an inflated result.
            int totalTablesUpdateClientSide = 1; // data table is always updated
            PTable bestTable = bestPlan.getTableRef().getTable();
            // global immutable tables are also updated client side (but don't double count the data table)
            if (bestPlan != dataPlan && isMaintainedOnClient(bestTable)) {
                totalTablesUpdateClientSide++;
            }
            for (TableRef otherTableRef : otherTableRefs) {
                PTable otherTable = otherTableRef.getTable();
                // Don't double count the data table here (which morphs when it becomes a projected table, hence this check)
                if (projectedTableRef != otherTableRef && isMaintainedOnClient(otherTable)) {
                    totalTablesUpdateClientSide++;
                }
            }
            MutationState state = new MutationState(maxSize, maxSizeBytes, connection, totalRowCount/totalTablesUpdateClientSide);

            // set the read metrics accumulated in the parent context so that it can be published when the mutations are committed.
            state.setReadMetricQueue(context.getReadMetricsQueue());

            return state;
        } else {
            // Otherwise, we have to execute the query and produce the delete mutations in the single thread
            // producing the query results.
            return deleteRows(context, iterator, bestPlan, projectedTableRef, otherTableRefs);
        }
    } finally {
        iterator.close();
    }
}
 
Example 11
Source File: HashCacheClient.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private void serialize(ImmutableBytesWritable ptr, ResultIterator iterator, long estimatedSize, List<Expression> onExpressions, boolean singleValueOnly, Expression keyRangeRhsExpression, List<Expression> keyRangeRhsValues) throws SQLException {
    long maxSize = serverCache.getConnection().getQueryServices().getProps().getLong(QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE);
    estimatedSize = Math.min(estimatedSize, maxSize);
    if (estimatedSize > Integer.MAX_VALUE) {
        throw new IllegalStateException("Estimated size(" + estimatedSize + ") must not be greater than Integer.MAX_VALUE(" + Integer.MAX_VALUE + ")");
    }
    try {
        TrustedByteArrayOutputStream baOut = new TrustedByteArrayOutputStream((int)estimatedSize);
        DataOutputStream out = new DataOutputStream(baOut);
        // Write onExpressions first, for hash key evaluation along with deserialization
        out.writeInt(onExpressions.size());
        for (Expression expression : onExpressions) {
            WritableUtils.writeVInt(out, ExpressionType.valueOf(expression).ordinal());
            expression.write(out);                
        }
        int exprSize = baOut.size() + Bytes.SIZEOF_INT;
        out.writeInt(exprSize * (singleValueOnly ? -1 : 1));
        int nRows = 0;
        out.writeInt(nRows); // In the end will be replaced with total number of rows            
        ImmutableBytesWritable tempPtr = new ImmutableBytesWritable();
        for (Tuple result = iterator.next(); result != null; result = iterator.next()) {
            TupleUtil.write(result, out);
            if (baOut.size() > maxSize) {
                throw new MaxServerCacheSizeExceededException("Size of hash cache (" + baOut.size() + " bytes) exceeds the maximum allowed size (" + maxSize + " bytes)");
            }
            // Evaluate key expressions for hash join key range optimization.
            if (keyRangeRhsExpression != null) {
                keyRangeRhsValues.add(evaluateKeyExpression(keyRangeRhsExpression, result, tempPtr));
            }
            nRows++;
        }
        TrustedByteArrayOutputStream sizeOut = new TrustedByteArrayOutputStream(Bytes.SIZEOF_INT);
        DataOutputStream dataOut = new DataOutputStream(sizeOut);
        try {
            dataOut.writeInt(nRows);
            dataOut.flush();
            byte[] cache = baOut.getBuffer();
            // Replace number of rows written above with the correct value.
            System.arraycopy(sizeOut.getBuffer(), 0, cache, exprSize, sizeOut.size());
            // Reallocate to actual size plus compressed buffer size (which is allocated below)
            int maxCompressedSize = Snappy.maxCompressedLength(baOut.size());
            byte[] compressed = new byte[maxCompressedSize]; // size for worst case
            int compressedSize = Snappy.compress(baOut.getBuffer(), 0, baOut.size(), compressed, 0);
            // Last realloc to size of compressed buffer.
            ptr.set(compressed,0,compressedSize);
        } finally {
            dataOut.close();
        }
    } catch (IOException e) {
        throw ServerUtil.parseServerException(e);
    } finally {
        iterator.close();
    }
}
 
Example 12
Source File: BaseQueryPlan.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private void getEstimates() throws SQLException {
    getEstimatesCalled = true;
    // Initialize a dummy iterator to get the estimates based on stats.
    ResultIterator iterator = iterator();
    iterator.close();
}