Java Code Examples for org.apache.phoenix.schema.PTable#getIndexType()

The following examples show how to use org.apache.phoenix.schema.PTable#getIndexType() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseQueryPlan.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void serializeIndexMaintainerIntoScan(Scan scan, PTable dataTable) throws SQLException {
    PName name = context.getCurrentTable().getTable().getName();
    List<PTable> indexes = Lists.newArrayListWithExpectedSize(1);
    for (PTable index : dataTable.getIndexes()) {
        if (index.getName().equals(name) && index.getIndexType() == IndexType.LOCAL) {
            indexes.add(index);
            break;
        }
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    IndexMaintainer.serialize(dataTable, ptr, indexes, context.getConnection());
    scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO, ByteUtil.copyKeyBytesIfNecessary(ptr));
    if (dataTable.isTransactional()) {
        scan.setAttribute(BaseScannerRegionObserver.TX_STATE, context.getConnection().getMutationState().encodeTransaction());
    }
}
 
Example 2
Source File: DefaultParallelScanGrouper.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public boolean shouldStartNewScan(QueryPlan plan, List<Scan> scans, byte[] startKey, boolean crossedRegionBoundary) {
  PTable table = plan.getTableRef().getTable();
  boolean startNewScanGroup = false;
  if (!plan.isRowKeyOrdered()) {
    startNewScanGroup = true;
  } else if (crossedRegionBoundary) {
    if (table.getIndexType() == IndexType.LOCAL) {
      startNewScanGroup = true;
    } else if (table.getBucketNum() != null) {
      startNewScanGroup = scans.isEmpty() ||
          ScanUtil.crossesPrefixBoundary(startKey,
              ScanUtil.getPrefix(scans.get(scans.size()-1).getStartRow(), SaltingUtil.NUM_SALTING_BYTES),
              SaltingUtil.NUM_SALTING_BYTES);
    }
  }
  return startNewScanGroup;
}
 
Example 3
Source File: PhoenixRuntime.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * Get expression that may be used to evaluate the tenant ID of a given row in a
 * multi-tenant table. Both the SYSTEM.CATALOG table and the SYSTEM.SEQUENCE
 * table are considered multi-tenant.
 * @param conn open Phoenix connection
 * @param fullTableName full table name
 * @return An expression that may be evaluated for a row in the provided table or
 * null if the table is not a multi-tenant table. 
 * @throws SQLException if the table name is not found, a TableNotFoundException
 * is thrown. If a multi-tenant local index is supplied a SQLFeatureNotSupportedException
 * is thrown.
 */
public static Expression getTenantIdExpression(Connection conn, String fullTableName) throws SQLException {
    PTable table = getTable(conn, fullTableName);
    // TODO: consider setting MULTI_TENANT = true for SYSTEM.CATALOG and SYSTEM.SEQUENCE
    if (!SchemaUtil.isMetaTable(table) && !SchemaUtil.isSequenceTable(table) && !table.isMultiTenant()) {
        return null;
    }
    if (table.getIndexType() == IndexType.LOCAL) {
        /*
         * With some hackery, we could deduce the tenant ID from a multi-tenant local index,
         * however it's not clear that we'd want to maintain the same prefixing of the region
         * start key, as the region boundaries may end up being different on a cluster being
         * replicated/backed-up to (which is the use case driving the method).
         */
        throw new SQLFeatureNotSupportedException();
    }
    
    int pkPosition = table.getBucketNum() == null ? 0 : 1;
    List<PColumn> pkColumns = table.getPKColumns();
    return new RowKeyColumnExpression(pkColumns.get(pkPosition), new RowKeyValueAccessor(pkColumns, pkPosition));
}
 
Example 4
Source File: PhoenixRuntime.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private static Expression getFirstPKColumnExpression(PTable table) throws SQLException {
    if (table.getIndexType() == IndexType.LOCAL) {
        /*
         * With some hackery, we could deduce the tenant ID from a multi-tenant local index,
         * however it's not clear that we'd want to maintain the same prefixing of the region
         * start key, as the region boundaries may end up being different on a cluster being
         * replicated/backed-up to (which is the use case driving the method).
         */
        throw new SQLFeatureNotSupportedException();
    }
    
    // skip salt and viewIndexId columns.
    int pkPosition = (table.getBucketNum() == null ? 0 : 1) + (table.getViewIndexId() == null ? 0 : 1);
    List<PColumn> pkColumns = table.getPKColumns();
    return new RowKeyColumnExpression(pkColumns.get(pkPosition), new RowKeyValueAccessor(pkColumns, pkPosition));
}
 
Example 5
Source File: UpgradeUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Synchronize certain properties across column families of global index tables for a given base table
 * @param cqs CQS object to get table descriptor from PTable
 * @param baseTable base table
 * @param defaultColFam column family to be used for synchronizing properties
 * @param syncedProps Map of properties to be kept in sync as read from the default column family descriptor
 * @param tableDescsToSync set of modified table descriptors
 */
private static void syncGlobalIndexesForTable(ConnectionQueryServices cqs, PTable baseTable, ColumnFamilyDescriptor defaultColFam,
        Map<String, Object> syncedProps, Set<TableDescriptor> tableDescsToSync) throws SQLException {
    for (PTable indexTable: baseTable.getIndexes()) {
        // We already handle local index property synchronization when considering all column families of the base table
        if (indexTable.getIndexType() == IndexType.GLOBAL) {
            addTableDescIfPropsChanged(cqs.getTableDescriptor(indexTable.getPhysicalName().getBytes()),
                    defaultColFam, syncedProps, tableDescsToSync);
        }
    }
}
 
Example 6
Source File: SchemaUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static boolean hasGlobalIndex(PTable table) {
    for (PTable index : table.getIndexes()) {
        if (index.getIndexType() == IndexType.GLOBAL) {
            return true;
        }
    }
    return false;
}
 
Example 7
Source File: IndexScrutinyMapper.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private long getTableTtl() throws SQLException, IOException {
    PTable pSourceTable = PhoenixRuntime.getTable(connection, qSourceTable);
    if (pSourceTable.getType() == PTableType.INDEX
            && pSourceTable.getIndexType() == PTable.IndexType.LOCAL) {
        return Integer.MAX_VALUE;
    }
    ConnectionQueryServices
            cqsi = connection.unwrap(PhoenixConnection.class).getQueryServices();
    Admin admin = cqsi.getAdmin();
    String physicalTable = getSourceTableName(pSourceTable,
            SchemaUtil.isNamespaceMappingEnabled(null, cqsi.getProps()));
    HTableDescriptor tableDesc = admin.getTableDescriptor(TableName.valueOf(physicalTable));
    return tableDesc.getFamily(SchemaUtil.getEmptyColumnFamily(pSourceTable)).getTimeToLive();
}
 
Example 8
Source File: DeleteCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private static boolean isMaintainedOnClient(PTable table) {
    // Test for not being local (rather than being GLOBAL) so that this doesn't fail
    // when tested with our projected table.
    return (table.getIndexType() != IndexType.LOCAL && (table.isTransactional() || table.isImmutableRows())) ||
           (table.getIndexType() == IndexType.LOCAL && (table.isTransactional() &&
            table.getTransactionProvider().getTransactionProvider().isUnsupported(Feature.MAINTAIN_LOCAL_INDEX_ON_SERVER) ) );
}
 
Example 9
Source File: FromCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private ProjectedTableColumnResolver(PTable projectedTable, PhoenixConnection conn, Map<String, UDFParseNode> udfParseNodes) throws SQLException {
    super(conn, 0, udfParseNodes, null);
    Preconditions.checkArgument(projectedTable.getType() == PTableType.PROJECTED);
    this.isLocalIndex = projectedTable.getIndexType() == IndexType.LOCAL;
    this.columnRefMap = new HashMap<ColumnRef, Integer>();
    long ts = Long.MAX_VALUE;
    for (int i = projectedTable.getBucketNum() == null ? 0 : 1; i < projectedTable.getColumns().size(); i++) {
        PColumn column = projectedTable.getColumns().get(i);
        ColumnRef colRef = ((ProjectedColumn) column).getSourceColumnRef();
        TableRef tableRef = colRef.getTableRef();
        if (!tables.contains(tableRef)) {
            String alias = tableRef.getTableAlias();
            if (alias != null) {
                this.tableMap.put(alias, tableRef);
            }
            String name = tableRef.getTable().getName().getString();
            if (alias == null || !alias.equals(name)) {
                tableMap.put(name, tableRef);
            }
            tables.add(tableRef);
            if (tableRef.getLowerBoundTimeStamp() < ts) {
                ts = tableRef.getLowerBoundTimeStamp();
            }
        }
        this.columnRefMap.put(new ColumnRef(tableRef, colRef.getColumnPosition()), column.getPosition());
    }
    this.theTableRefs = ImmutableList.of(new TableRef(ParseNodeFactory.createTempAlias(), projectedTable, ts, false));
    
}
 
Example 10
Source File: IndexHalfStoreFileReaderGenerator.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * @param env
 * @param store Local Index store 
 * @param scan
 * @param scanType
 * @param earliestPutTs
 * @param request
 * @return StoreScanner for new Local Index data for a passed store and Null if repair is not possible
 * @throws IOException
 */
private InternalScanner getRepairScanner(RegionCoprocessorEnvironment env, Store store) throws IOException {
    //List<KeyValueScanner> scannersForStoreFiles = Lists.newArrayListWithExpectedSize(store.getStorefilesCount());
    Scan scan = new Scan();
    scan.readVersions(store.getColumnFamilyDescriptor().getMaxVersions());
    for (Store s : env.getRegion().getStores()) {
        if (!IndexUtil.isLocalIndexStore(s)) {
            scan.addFamily(s.getColumnFamilyDescriptor().getName());
        }
    }
    try {
        PhoenixConnection conn = QueryUtil.getConnectionOnServer(env.getConfiguration())
                .unwrap(PhoenixConnection.class);
        PTable dataPTable = IndexUtil.getPDataTable(conn, env.getRegion().getTableDescriptor());
        final List<IndexMaintainer> maintainers = Lists
                .newArrayListWithExpectedSize(dataPTable.getIndexes().size());
        for (PTable index : dataPTable.getIndexes()) {
            if (index.getIndexType() == IndexType.LOCAL) {
                maintainers.add(index.getIndexMaintainer(dataPTable, conn));
            }
        }
        return new DataTableLocalIndexRegionScanner(env.getRegion().getScanner(scan), env.getRegion(),
                maintainers, store.getColumnFamilyDescriptor().getName(),env.getConfiguration());
        

    } catch (SQLException e) {
        throw new IOException(e);

    }
}
 
Example 11
Source File: BaseQueryPlan.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void serializeIndexMaintainerIntoScan(Scan scan, PTable dataTable) {
    PName name = context.getCurrentTable().getTable().getName();
    List<PTable> indexes = Lists.newArrayListWithExpectedSize(1);
    for (PTable index : dataTable.getIndexes()) {
        if (index.getName().equals(name) && index.getIndexType() == IndexType.LOCAL) {
            indexes.add(index);
            break;
        }
    }
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    IndexMaintainer.serialize(dataTable, ptr, indexes, context.getConnection());
    scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD, ByteUtil.copyKeyBytesIfNecessary(ptr));
}
 
Example 12
Source File: ScanUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static final boolean canQueryBeExecutedSerially(PTable table, OrderBy orderBy, StatementContext context) {
    /*
     * If ordering by columns not on the PK axis, we can't execute a query serially because we
     * need to do a merge sort across all the scans which isn't possible with SerialIterators.
     * Similar reasoning follows for salted and local index tables when ordering rows in a row
     * key order. Serial execution is OK in other cases since SerialIterators will execute scans
     * in the correct order.
     */
    if (!orderBy.getOrderByExpressions().isEmpty()
            || ((table.getBucketNum() != null || table.getIndexType() == IndexType.LOCAL) && shouldRowsBeInRowKeyOrder(
                orderBy, context))) {
        return false;
    }
    return true;
}
 
Example 13
Source File: ProjectionCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private static void projectIndexColumnFamily(StatementContext context, String cfName, TableRef tableRef, List<Expression> projectedExpressions, List<ExpressionProjector> projectedColumns) throws SQLException {
    PTable index = tableRef.getTable();
    PhoenixConnection conn = context.getConnection();
    String tableName = index.getParentName().getString();
    PTable table = conn.getMetaDataCache().getTable(new PTableKey(conn.getTenantId(), tableName));
    PColumnFamily pfamily = table.getColumnFamily(cfName);
    for (PColumn column : pfamily.getColumns()) {
        String indexColName = IndexUtil.getIndexColumnName(column);
        PColumn indexColumn = null;
        ColumnRef ref = null;
        try {
            indexColumn = index.getColumn(indexColName);
            ref = new ColumnRef(tableRef, indexColumn.getPosition());
        } catch (ColumnNotFoundException e) {
            if (index.getIndexType() == IndexType.LOCAL) {
                try {
                    ref = new LocalIndexDataColumnRef(context, indexColName);
                    indexColumn = ref.getColumn();
                } catch (ColumnFamilyNotFoundException c) {
                    throw e;
                }
            } else {
                throw e;
            }
        }
        Expression expression = ref.newColumnExpression();
        projectedExpressions.add(expression);
        String colName = column.getName().toString();
        boolean isCaseSensitive = !SchemaUtil.normalizeIdentifier(colName).equals(colName);
        projectedColumns.add(new ExpressionProjector(colName, 
                tableRef.getTableAlias() == null ? table.getName().getString() : tableRef.getTableAlias(), expression, isCaseSensitive));
    }
}
 
Example 14
Source File: ServerBuildIndexCompiler.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public MutationPlan compile(PTable index) throws SQLException {
    try (final PhoenixStatement statement = new PhoenixStatement(connection)) {
        String query = "SELECT count(*) FROM " + tableName;
        this.plan = statement.compileQuery(query);
        TableRef tableRef = plan.getTableRef();
        Scan scan = plan.getContext().getScan();
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        dataTable = tableRef.getTable();
        if (index.getIndexType() == PTable.IndexType.GLOBAL &&  dataTable.isTransactional()) {
            throw new IllegalArgumentException(
                    "ServerBuildIndexCompiler does not support global indexes on transactional tables");
        }
        IndexMaintainer indexMaintainer = index.getIndexMaintainer(dataTable, connection);
        // By default, we'd use a FirstKeyOnly filter as nothing else needs to be projected for count(*).
        // However, in this case, we need to project all of the data columns that contribute to the index.
        for (ColumnReference columnRef : indexMaintainer.getAllColumns()) {
            if (index.getImmutableStorageScheme() == PTable.ImmutableStorageScheme.SINGLE_CELL_ARRAY_WITH_OFFSETS) {
                scan.addFamily(columnRef.getFamily());
            } else {
                scan.addColumn(columnRef.getFamily(), columnRef.getQualifier());
            }
        }
        IndexMaintainer.serialize(dataTable, ptr, Collections.singletonList(index), plan.getContext().getConnection());
        // Set the scan attributes that UngroupedAggregateRegionObserver will switch on.
        // For local indexes, the BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO attribute, and
        // for global indexes PhoenixIndexCodec.INDEX_PROTO_MD attribute is set to the serialized form of index
        // metadata to build index rows from data table rows. For global indexes, we also need to set (1) the
        // BaseScannerRegionObserver.REBUILD_INDEXES attribute in order to signal UngroupedAggregateRegionObserver
        // that this scan is for building global indexes and (2) the MetaDataProtocol.PHOENIX_VERSION attribute
        // that will be passed as a mutation attribute for the scanned mutations that will be applied on
        // the index table possibly remotely
        if (index.getIndexType() == PTable.IndexType.LOCAL) {
            scan.setAttribute(BaseScannerRegionObserver.LOCAL_INDEX_BUILD_PROTO, ByteUtil.copyKeyBytesIfNecessary(ptr));
        } else {
            scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ByteUtil.copyKeyBytesIfNecessary(ptr));
            scan.setAttribute(BaseScannerRegionObserver.REBUILD_INDEXES, TRUE_BYTES);
            ScanUtil.setClientVersion(scan, MetaDataProtocol.PHOENIX_VERSION);
            scan.setAttribute(BaseScannerRegionObserver.INDEX_REBUILD_PAGING, TRUE_BYTES);
            // Serialize page row size only if we're overriding, else use server side value
            String rebuildPageRowSize =
                    connection.getQueryServices().getProps()
                            .get(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS);
            if (rebuildPageRowSize != null) {
                scan.setAttribute(BaseScannerRegionObserver.INDEX_REBUILD_PAGE_ROWS,
                    Bytes.toBytes(Long.valueOf(rebuildPageRowSize)));
            }
            BaseQueryPlan.serializeViewConstantsIntoScan(scan, dataTable);
            addEmptyColumnToScan(scan, indexMaintainer.getDataEmptyKeyValueCF(), indexMaintainer.getEmptyKeyValueQualifier());
        }
        if (dataTable.isTransactional()) {
            scan.setAttribute(BaseScannerRegionObserver.TX_STATE, connection.getMutationState().encodeTransaction());
        }

        // Go through MutationPlan abstraction so that we can create local indexes
        // with a connectionless connection (which makes testing easier).
        return new RowCountMutationPlan(plan.getContext(), PhoenixStatement.Operation.UPSERT);
    }
}
 
Example 15
Source File: UpsertCompiler.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private static void setValues(byte[][] values, int[] pkSlotIndex, int[] columnIndexes,
        PTable table, MultiRowMutationState mutation,
        PhoenixStatement statement, boolean useServerTimestamp, IndexMaintainer maintainer,
        byte[][] viewConstants, byte[] onDupKeyBytes, int numSplColumns) throws SQLException {
    long columnValueSize = 0;
    Map<PColumn,byte[]> columnValues = Maps.newHashMapWithExpectedSize(columnIndexes.length);
    byte[][] pkValues = new byte[table.getPKColumns().size()][];
    // If the table uses salting, the first byte is the salting byte, set to an empty array
    // here and we will fill in the byte later in PRowImpl.
    if (table.getBucketNum() != null) {
        pkValues[0] = new byte[] {0};
    }
    for(int i = 0; i < numSplColumns; i++) {
        pkValues[i + (table.getBucketNum() != null ? 1 : 0)] = values[i];
    }
    Long rowTimestamp = null; // case when the table doesn't have a row timestamp column
    RowTimestampColInfo rowTsColInfo = new RowTimestampColInfo(useServerTimestamp, rowTimestamp);
    for (int i = 0, j = numSplColumns; j < values.length; j++, i++) {
        byte[] value = values[j];
        PColumn column = table.getColumns().get(columnIndexes[i]);
        if (SchemaUtil.isPKColumn(column)) {
            pkValues[pkSlotIndex[i]] = value;
            if (SchemaUtil.getPKPosition(table, column) == table.getRowTimestampColPos()) {
                if (!useServerTimestamp) {
                    PColumn rowTimestampCol = table.getPKColumns().get(table.getRowTimestampColPos());
                    rowTimestamp = PLong.INSTANCE.getCodec().decodeLong(value, 0, rowTimestampCol.getSortOrder());
                    if (rowTimestamp < 0) {
                        throw new IllegalDataException("Value of a column designated as ROW_TIMESTAMP cannot be less than zero");
                    }
                    rowTsColInfo = new RowTimestampColInfo(useServerTimestamp, rowTimestamp);
                } 
            }
        } else {
            columnValues.put(column, value);
            columnValueSize += (column.getEstimatedSize() + value.length);
        }
    }
    ImmutableBytesPtr ptr = new ImmutableBytesPtr();
    table.newKey(ptr, pkValues);
    if (table.getIndexType() == IndexType.LOCAL && maintainer != null) {
        byte[] rowKey = maintainer.buildDataRowKey(ptr, viewConstants);
        HRegionLocation region =
                statement.getConnection().getQueryServices()
                        .getTableRegionLocation(table.getParentName().getBytes(), rowKey);
        byte[] regionPrefix =
                region.getRegion().getStartKey().length == 0 ? new byte[region
                        .getRegion().getEndKey().length] : region.getRegion()
                        .getStartKey();
        if (regionPrefix.length != 0) {
            ptr.set(ScanRanges.prefixKey(ptr.get(), 0, ptr.getLength(), regionPrefix,
                regionPrefix.length));
        }
    } 
    mutation.put(ptr, new RowMutationState(columnValues, columnValueSize, statement.getConnection().getStatementExecutionCounter(), rowTsColInfo, onDupKeyBytes));
}
 
Example 16
Source File: UpsertCompiler.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static MutationState upsertSelect(StatementContext childContext, TableRef tableRef,
        RowProjector projector, ResultIterator iterator, int[] columnIndexes,
        int[] pkSlotIndexes, boolean useServerTimestamp,
        boolean prefixSysColValues) throws SQLException {
    PhoenixStatement statement = childContext.getStatement();
    PhoenixConnection connection = statement.getConnection();
    ConnectionQueryServices services = connection.getQueryServices();
    int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,
            QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    int maxSizeBytes =
            services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB,
                QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE_BYTES);
    int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
    // we automatically flush the mutations when either auto commit is enabled, or
    // the target table is transactional (in that case changes are not visible until we commit)
    final boolean autoFlush = connection.getAutoCommit() || tableRef.getTable().isTransactional();
    int sizeOffset = 0;
    int numSplColumns =
            (tableRef.getTable().isMultiTenant() ? 1 : 0)
                    + (tableRef.getTable().getViewIndexId() != null ? 1 : 0);
    byte[][] values = new byte[columnIndexes.length + numSplColumns][];
    if(prefixSysColValues) {
        int i = 0;
        if(tableRef.getTable().isMultiTenant()) {
            values[i++] = connection.getTenantId().getBytes();
        }
        if(tableRef.getTable().getViewIndexId() != null) {
            values[i++] = PSmallint.INSTANCE.toBytes(tableRef.getTable().getViewIndexId());
        }
    }
    int rowCount = 0;
    MultiRowMutationState mutation = new MultiRowMutationState(batchSize);
    PTable table = tableRef.getTable();
    IndexMaintainer indexMaintainer = null;
    byte[][] viewConstants = null;
    if (table.getIndexType() == IndexType.LOCAL) {
        PTable parentTable =
                statement
                        .getConnection()
                        .getMetaDataCache()
                        .getTableRef(
                            new PTableKey(statement.getConnection().getTenantId(), table
                                    .getParentName().getString())).getTable();
        indexMaintainer = table.getIndexMaintainer(parentTable, connection);
        viewConstants = IndexUtil.getViewConstants(parentTable);
    }
    try (ResultSet rs = new PhoenixResultSet(iterator, projector, childContext)) {
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        while (rs.next()) {
            for (int i = 0, j = numSplColumns; j < values.length; j++, i++) {
                PColumn column = table.getColumns().get(columnIndexes[i]);
                byte[] bytes = rs.getBytes(i + 1);
                ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes);
                Object value = rs.getObject(i + 1);
                int rsPrecision = rs.getMetaData().getPrecision(i + 1);
                Integer precision = rsPrecision == 0 ? null : rsPrecision;
                int rsScale = rs.getMetaData().getScale(i + 1);
                Integer scale = rsScale == 0 ? null : rsScale;
                // We are guaranteed that the two column will have compatible types,
                // as we checked that before.
                if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(),
                        SortOrder.getDefault(), precision,
                        scale, column.getMaxLength(), column.getScale())) {
                    throw new SQLExceptionInfo.Builder(
                        SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY).setColumnName(
                                column.getName().getString())
                        .setMessage("value=" + column.getDataType()
                                .toStringLiteral(ptr, null)).build()
                        .buildException();
                }
                column.getDataType().coerceBytes(ptr, value, column.getDataType(), 
                        precision, scale, SortOrder.getDefault(), 
                        column.getMaxLength(), column.getScale(), column.getSortOrder(),
                        table.rowKeyOrderOptimizable());
                values[j] = ByteUtil.copyKeyBytesIfNecessary(ptr);
            }
            setValues(values, pkSlotIndexes, columnIndexes, table, mutation, statement,
                    useServerTimestamp, indexMaintainer, viewConstants, null,
                    numSplColumns);
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (autoFlush && rowCount % batchSize == 0) {
                MutationState state = new MutationState(tableRef, mutation, 0,
                        maxSize, maxSizeBytes, connection);
                connection.getMutationState().join(state);
                connection.getMutationState().send();
                mutation.clear();
            }
        }

        if (autoFlush) {
            // If auto commit is true, this last batch will be committed upon return
            sizeOffset = rowCount / batchSize * batchSize;
        }
        return new MutationState(tableRef, mutation, sizeOffset, maxSize,
                maxSizeBytes, connection);
    }
}
 
Example 17
Source File: IndexUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static void setScanAttributesForIndexReadRepair(Scan scan, PTable table, PhoenixConnection phoenixConnection) throws SQLException {
    if (table.isTransactional() || table.getType() != PTableType.INDEX) {
        return;
    }
    PTable indexTable = table;
    if (indexTable.getIndexType() != PTable.IndexType.GLOBAL) {
        return;
    }
    String schemaName = indexTable.getParentSchemaName().getString();
    String tableName = indexTable.getParentTableName().getString();
    PTable dataTable;
    try {
        dataTable = PhoenixRuntime.getTable(phoenixConnection, SchemaUtil.getTableName(schemaName, tableName));
    } catch (TableNotFoundException e) {
        // This index table must be being deleted. No need to set the scan attributes
        return;
    }
    // MetaDataClient modifies the index table name for view indexes if the parent view of an index has a child
    // view. This, we need to recreate a PTable object with the correct table name for the rest of this code to work
    if (indexTable.getViewIndexId() != null && indexTable.getName().getString().contains(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR)) {
        int lastIndexOf = indexTable.getName().getString().lastIndexOf(QueryConstants.CHILD_VIEW_INDEX_NAME_SEPARATOR);
        String indexName = indexTable.getName().getString().substring(lastIndexOf + 1);
        indexTable = PhoenixRuntime.getTable(phoenixConnection, indexName);
    }
    if (!dataTable.getIndexes().contains(indexTable)) {
        return;
    }
    if (scan.getAttribute(PhoenixIndexCodec.INDEX_PROTO_MD) == null) {
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        IndexMaintainer.serialize(dataTable, ptr, Collections.singletonList(indexTable), phoenixConnection);
        scan.setAttribute(PhoenixIndexCodec.INDEX_PROTO_MD, ByteUtil.copyKeyBytesIfNecessary(ptr));
    }
    scan.setAttribute(BaseScannerRegionObserver.CHECK_VERIFY_COLUMN, TRUE_BYTES);
    scan.setAttribute(BaseScannerRegionObserver.PHYSICAL_DATA_TABLE_NAME, dataTable.getPhysicalName().getBytes());
    IndexMaintainer indexMaintainer = indexTable.getIndexMaintainer(dataTable, phoenixConnection);
    byte[] emptyCF = indexMaintainer.getEmptyKeyValueFamily().copyBytesIfNecessary();
    byte[] emptyCQ = indexMaintainer.getEmptyKeyValueQualifier();
    scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_FAMILY_NAME, emptyCF);
    scan.setAttribute(BaseScannerRegionObserver.EMPTY_COLUMN_QUALIFIER_NAME, emptyCQ);
    if (scan.getAttribute(BaseScannerRegionObserver.VIEW_CONSTANTS) == null) {
        BaseQueryPlan.serializeViewConstantsIntoScan(scan, dataTable);
    }
    addEmptyColumnToScan(scan, emptyCF, emptyCQ);
}
 
Example 18
Source File: SchemaUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static byte[] getEmptyColumnFamily(PTable table) {
    List<PColumnFamily> families = table.getColumnFamilies();
    return families.isEmpty() ? table.getDefaultFamilyName() == null ? (table.getIndexType() == IndexType.LOCAL ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES : QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES) : table.getDefaultFamilyName().getBytes() : families.get(0).getName().getBytes();
}
 
Example 19
Source File: SchemaUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static String getEmptyColumnFamilyAsString(PTable table) {
    List<PColumnFamily> families = table.getColumnFamilies();
    return families.isEmpty() ? table.getDefaultFamilyName() == null ? (table.getIndexType() == IndexType.LOCAL ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY : QueryConstants.DEFAULT_COLUMN_FAMILY) : table.getDefaultFamilyName().getString() : families.get(0).getName().getString();
}
 
Example 20
Source File: SchemaUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static ImmutableBytesPtr getEmptyColumnFamilyPtr(PTable table) {
    List<PColumnFamily> families = table.getColumnFamilies();
    return families.isEmpty() ? table.getDefaultFamilyName() == null ? (table.getIndexType() == IndexType.LOCAL ? QueryConstants.DEFAULT_LOCAL_INDEX_COLUMN_FAMILY_BYTES_PTR : QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES_PTR) : table.getDefaultFamilyName().getBytesPtr() : families.get(0)
            .getName().getBytesPtr();
}