Java Code Examples for org.apache.phoenix.schema.PTable#getPKColumns()

The following examples show how to use org.apache.phoenix.schema.PTable#getPKColumns() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PhoenixRuntime.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private static Expression getFirstPKColumnExpression(PTable table) throws SQLException {
    if (table.getIndexType() == IndexType.LOCAL) {
        /*
         * With some hackery, we could deduce the tenant ID from a multi-tenant local index,
         * however it's not clear that we'd want to maintain the same prefixing of the region
         * start key, as the region boundaries may end up being different on a cluster being
         * replicated/backed-up to (which is the use case driving the method).
         */
        throw new SQLFeatureNotSupportedException();
    }
    
    // skip salt and viewIndexId columns.
    int pkPosition = (table.getBucketNum() == null ? 0 : 1) + (table.getViewIndexId() == null ? 0 : 1);
    List<PColumn> pkColumns = table.getPKColumns();
    return new RowKeyColumnExpression(pkColumns.get(pkPosition), new RowKeyValueAccessor(pkColumns, pkPosition));
}
 
Example 2
Source File: PhoenixRuntime.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private static List<PColumn> getPkColumns(PTable ptable, Connection conn) throws SQLException {
    PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
    List<PColumn> pkColumns = ptable.getPKColumns();
    
    // Skip the salting column and the view index id column if present.
    // Skip the tenant id column too if the connection is tenant specific and the table used by the query plan is multi-tenant
    int offset = (ptable.getBucketNum() == null ? 0 : 1) + (ptable.isMultiTenant() && pConn.getTenantId() != null ? 1 : 0) + (ptable.getViewIndexId() == null ? 0 : 1);
    
    // get a sublist of pkColumns by skipping the offset columns.
    pkColumns = pkColumns.subList(offset, pkColumns.size());
    
    if (ptable.getType() == PTableType.INDEX) {
        // index tables have the same schema name as their parent/data tables.
        String fullDataTableName = ptable.getParentName().getString();
        
        // Get the corresponding columns of the data table.
        List<PColumn> dataColumns = IndexUtil.getDataColumns(fullDataTableName, pkColumns, pConn);
        pkColumns = dataColumns;
    }
    return pkColumns;
}
 
Example 3
Source File: PhoenixRuntime.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Deprecated
private static List<PColumn> getPkColumns(PTable ptable, Connection conn, boolean forDataTable) throws SQLException {
    PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
    List<PColumn> pkColumns = ptable.getPKColumns();
    
    // Skip the salting column and the view index id column if present.
    // Skip the tenant id column too if the connection is tenant specific and the table used by the query plan is multi-tenant
    int offset = (ptable.getBucketNum() == null ? 0 : 1) + (ptable.isMultiTenant() && pConn.getTenantId() != null ? 1 : 0) + (ptable.getViewIndexId() == null ? 0 : 1);
    
    // get a sublist of pkColumns by skipping the offset columns.
    pkColumns = pkColumns.subList(offset, pkColumns.size());
    
    if (ptable.getType() == PTableType.INDEX && forDataTable) {
        // index tables have the same schema name as their parent/data tables.
        String fullDataTableName = ptable.getParentName().getString();
        
        // Get the corresponding columns of the data table.
        List<PColumn> dataColumns = IndexUtil.getDataColumns(fullDataTableName, pkColumns, pConn);
        pkColumns = dataColumns;
    }
    return pkColumns;
}
 
Example 4
Source File: IndexUtil.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public static byte[][] getViewConstants(PTable dataTable) {
    if (dataTable.getType() != PTableType.VIEW && dataTable.getType() != PTableType.PROJECTED) return null;
    int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0);
    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
    List<byte[]> viewConstants = new ArrayList<byte[]>();
    List<PColumn> dataPkColumns = dataTable.getPKColumns();
    for (int i = dataPosOffset; i < dataPkColumns.size(); i++) {
        PColumn dataPKColumn = dataPkColumns.get(i);
        if (dataPKColumn.getViewConstant() != null) {
            if (IndexUtil.getViewConstantValue(dataPKColumn, ptr)) {
                viewConstants.add(ByteUtil.copyKeyBytesIfNecessary(ptr));
            } else {
                throw new IllegalStateException();
            }
        }
    }
    return viewConstants.isEmpty() ? null : viewConstants
            .toArray(new byte[viewConstants.size()][]);
}
 
Example 5
Source File: PhoenixRuntime.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private static List<PColumn> getPkColumns(PTable ptable, Connection conn, boolean forDataTable) throws SQLException {
    PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
    List<PColumn> pkColumns = ptable.getPKColumns();
    
    // Skip the salting column and the view index id column if present.
    // Skip the tenant id column too if the connection is tenant specific and the table used by the query plan is multi-tenant
    int offset = (ptable.getBucketNum() == null ? 0 : 1) + (ptable.isMultiTenant() && pConn.getTenantId() != null ? 1 : 0) + (ptable.getViewIndexId() == null ? 0 : 1);
    
    // get a sublist of pkColumns by skipping the offset columns.
    pkColumns = pkColumns.subList(offset, pkColumns.size());
    
    if (ptable.getType() == PTableType.INDEX && forDataTable) {
        // index tables have the same schema name as their parent/data tables.
        String fullDataTableName = ptable.getParentName().getString();
        
        // Get the corresponding columns of the data table.
        List<PColumn> dataColumns = IndexUtil.getDataColumns(fullDataTableName, pkColumns, pConn);
        pkColumns = dataColumns;
    }
    return pkColumns;
}
 
Example 6
Source File: PhoenixRuntime.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * Get expression that may be used to evaluate the tenant ID of a given row in a
 * multi-tenant table. Both the SYSTEM.CATALOG table and the SYSTEM.SEQUENCE
 * table are considered multi-tenant.
 * @param conn open Phoenix connection
 * @param fullTableName full table name
 * @return An expression that may be evaluated for a row in the provided table or
 * null if the table is not a multi-tenant table. 
 * @throws SQLException if the table name is not found, a TableNotFoundException
 * is thrown. If a multi-tenant local index is supplied a SQLFeatureNotSupportedException
 * is thrown.
 */
public static Expression getTenantIdExpression(Connection conn, String fullTableName) throws SQLException {
    PTable table = getTable(conn, fullTableName);
    // TODO: consider setting MULTI_TENANT = true for SYSTEM.CATALOG and SYSTEM.SEQUENCE
    if (!SchemaUtil.isMetaTable(table) && !SchemaUtil.isSequenceTable(table) && !table.isMultiTenant()) {
        return null;
    }
    if (table.getIndexType() == IndexType.LOCAL) {
        /*
         * With some hackery, we could deduce the tenant ID from a multi-tenant local index,
         * however it's not clear that we'd want to maintain the same prefixing of the region
         * start key, as the region boundaries may end up being different on a cluster being
         * replicated/backed-up to (which is the use case driving the method).
         */
        throw new SQLFeatureNotSupportedException();
    }
    
    int pkPosition = table.getBucketNum() == null ? 0 : 1;
    List<PColumn> pkColumns = table.getPKColumns();
    return new RowKeyColumnExpression(pkColumns.get(pkPosition), new RowKeyValueAccessor(pkColumns, pkPosition));
}
 
Example 7
Source File: IndexHalfStoreFileReaderGenerator.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private byte[][] getViewConstants(PTable dataTable) {
    int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0);
    byte[][] viewConstants = null;
    int nViewConstants = 0;
    if (dataTable.getType() == PTableType.VIEW) {
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        List<PColumn> dataPkColumns = dataTable.getPKColumns();
        for (int i = dataPosOffset; i < dataPkColumns.size(); i++) {
            PColumn dataPKColumn = dataPkColumns.get(i);
            if (dataPKColumn.getViewConstant() != null) {
                nViewConstants++;
            }
        }
        if (nViewConstants > 0) {
            viewConstants = new byte[nViewConstants][];
            int j = 0;
            for (int i = dataPosOffset; i < dataPkColumns.size(); i++) {
                PColumn dataPkColumn = dataPkColumns.get(i);
                if (dataPkColumn.getViewConstant() != null) {
                    if (IndexUtil.getViewConstantValue(dataPkColumn, ptr)) {
                        viewConstants[j++] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                    } else {
                        throw new IllegalStateException();
                    }
                }
            }
        }
    }
    return viewConstants;
}
 
Example 8
Source File: ExpressionUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Create {@link RowKeyColumnExpression} from {@link PTable}.
 * The second part of the return pair is the rowkey column offset we must skip when we create OrderBys, because for table with salted/multiTenant/viewIndexId,
 * some leading rowkey columns should be skipped.
 * @param table
 * @param phoenixConnection
 * @return
 */
public static Pair<List<RowKeyColumnExpression>,Integer> getRowKeyColumnExpressionsFromTable(PTable table, PhoenixConnection phoenixConnection) {
    int pkPositionOffset = getRowKeyColumnOffset(table, phoenixConnection);
    List<PColumn> pkColumns = table.getPKColumns();
    if(pkPositionOffset >= pkColumns.size()) {
        return new Pair<List<RowKeyColumnExpression>,Integer>(Collections.<RowKeyColumnExpression> emptyList(), 0);
    }
    List<RowKeyColumnExpression> rowKeyColumnExpressions = new ArrayList<RowKeyColumnExpression>(pkColumns.size() - pkPositionOffset);
    for(int index = pkPositionOffset; index < pkColumns.size(); index++) {
        RowKeyColumnExpression rowKeyColumnExpression =
                new RowKeyColumnExpression(pkColumns.get(index), new RowKeyValueAccessor(pkColumns, index));
        rowKeyColumnExpressions.add(rowKeyColumnExpression);
    }
    return new Pair<List<RowKeyColumnExpression>,Integer>(rowKeyColumnExpressions, pkPositionOffset);
}
 
Example 9
Source File: SchemaUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Estimate the max key length in bytes of the PK for a given table
 * @param table the table
 * @return the max PK length
 */
public static int estimateKeyLength(PTable table) {
    int maxKeyLength = 0;
    // Calculate the max length of a key (each part must currently be of a fixed width)
    int i = 0;
    List<PColumn> columns = table.getPKColumns();
    while (i < columns.size()) {
        PColumn keyColumn = columns.get(i++);
        PDataType type = keyColumn.getDataType();
        Integer maxLength = keyColumn.getMaxLength();
        maxKeyLength += !type.isFixedWidth() ? VAR_LENGTH_ESTIMATE : maxLength == null ? type.getByteSize() : maxLength;
    }
    return maxKeyLength;
}
 
Example 10
Source File: AlterMultiTenantTableWithViewsIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private int getIndexOfPkColumn(PhoenixConnection conn, String columnName, String tableName) throws SQLException {
    String normalizedTableName = SchemaUtil.normalizeIdentifier(tableName);
    PTable table = conn.getTable(new PTableKey(conn.getTenantId(), normalizedTableName));
    List<PColumn> pkCols = table.getPKColumns();
    String normalizedColumnName = SchemaUtil.normalizeIdentifier(columnName);
    int i = 0;
    for (PColumn pkCol : pkCols) {
        if (pkCol.getName().getString().equals(normalizedColumnName)) {
            return i;
        }
        i++;
    }
    return -1;
}
 
Example 11
Source File: IndexExpressionParseNodeRewriter.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public IndexExpressionParseNodeRewriter(PTable index, String alias, PhoenixConnection connection, Map<String, UDFParseNode> udfParseNodes) throws SQLException {
      indexedParseNodeToColumnParseNodeMap = Maps.newHashMapWithExpectedSize(index.getColumns().size());
      NamedTableNode tableNode = NamedTableNode.create(alias,
              TableName.create(index.getParentSchemaName().getString(), index.getParentTableName().getString()),
              Collections.<ColumnDef> emptyList());
      ColumnResolver dataResolver = FromCompiler.getResolver(tableNode, connection, udfParseNodes);
      StatementContext context = new StatementContext(new PhoenixStatement(connection), dataResolver);
      IndexStatementRewriter rewriter = new IndexStatementRewriter(dataResolver, null, true);
      ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
      int indexPosOffset = (index.getBucketNum() == null ? 0 : 1) + (index.isMultiTenant() ? 1 : 0) + (index.getViewIndexId() == null ? 0 : 1);
      List<PColumn> pkColumns = index.getPKColumns();
for (int i=indexPosOffset; i<pkColumns.size(); ++i) {
      	PColumn column = pkColumns.get(i);
      	String expressionStr = IndexUtil.getIndexColumnExpressionStr(column);
          ParseNode expressionParseNode  = SQLParser.parseCondition(expressionStr);
          String colName = "\"" + column.getName().getString() + "\"";
          Expression dataExpression = expressionParseNode.accept(expressionCompiler);
          PDataType expressionDataType = dataExpression.getDataType();
          ParseNode indexedParseNode = expressionParseNode.accept(rewriter);
          PDataType indexColType = IndexUtil.getIndexColumnDataType(dataExpression.isNullable(), expressionDataType);
          ParseNode columnParseNode = new ColumnParseNode(alias!=null ? TableName.create(null, alias) : null, colName, null);
          if ( indexColType != expressionDataType) {
              columnParseNode = NODE_FACTORY.cast(columnParseNode, expressionDataType, null, null);
          }
          indexedParseNodeToColumnParseNodeMap.put(indexedParseNode, columnParseNode);
      }
  }
 
Example 12
Source File: AlterMultiTenantTableWithViewsIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private boolean checkColumnPartOfPk(PhoenixConnection conn, String columnName, String tableName) throws SQLException {
    String normalizedTableName = SchemaUtil.normalizeIdentifier(tableName);
    PTable table = conn.getTable(new PTableKey(conn.getTenantId(), normalizedTableName));
    List<PColumn> pkCols = table.getPKColumns();
    String normalizedColumnName = SchemaUtil.normalizeIdentifier(columnName);
    for (PColumn pkCol : pkCols) {
        if (pkCol.getName().getString().equals(normalizedColumnName)) {
            return true;
        }
    }
    return false;
}
 
Example 13
Source File: IndexHalfStoreFileReaderGenerator.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private byte[][] getViewConstants(PTable dataTable) {
    int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0);
    byte[][] viewConstants = null;
    int nViewConstants = 0;
    if (dataTable.getType() == PTableType.VIEW) {
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        List<PColumn> dataPkColumns = dataTable.getPKColumns();
        for (int i = dataPosOffset; i < dataPkColumns.size(); i++) {
            PColumn dataPKColumn = dataPkColumns.get(i);
            if (dataPKColumn.getViewConstant() != null) {
                nViewConstants++;
            }
        }
        if (nViewConstants > 0) {
            viewConstants = new byte[nViewConstants][];
            int j = 0;
            for (int i = dataPosOffset; i < dataPkColumns.size(); i++) {
                PColumn dataPkColumn = dataPkColumns.get(i);
                if (dataPkColumn.getViewConstant() != null) {
                    if (IndexUtil.getViewConstantValue(dataPkColumn, ptr)) {
                        viewConstants[j++] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                    } else {
                        throw new IllegalStateException();
                    }
                }
            }
        }
    }
    return viewConstants;
}
 
Example 14
Source File: PhoenixRuntime.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Encode the primary key values from the table as a byte array. The values must
 * be in the same order as the primary key constraint. If the connection and
 * table are both tenant-specific, the tenant ID column must not be present in
 * the values.
 * @param conn an open connection
 * @param fullTableName the full table name
 * @param values the values of the primary key columns ordered in the same order
 *  as the primary key constraint
 * @return the encoded byte array
 * @throws SQLException if the table cannot be found or the incorrect number of
 *  of values are provided
 * @see #decodePK(Connection, String, byte[]) to decode the byte[] back to the
 *  values
 */
@Deprecated
public static byte[] encodePK(Connection conn, String fullTableName, Object[] values) throws SQLException {
    PTable table = getTable(conn, fullTableName);
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    int offset = (table.getBucketNum() == null ? 0 : 1) + (table.isMultiTenant() && pconn.getTenantId() != null ? 1 : 0);
    List<PColumn> pkColumns = table.getPKColumns();
    if (pkColumns.size() - offset != values.length) {
        throw new SQLException("Expected " + (pkColumns.size() - offset) + " but got " + values.length);
    }
    PDataType type = null;
    TrustedByteArrayOutputStream output = new TrustedByteArrayOutputStream(table.getRowKeySchema().getEstimatedValueLength());
    try {
        for (int i = offset; i < pkColumns.size(); i++) {
            if (type != null && !type.isFixedWidth()) {
                output.write(QueryConstants.SEPARATOR_BYTE);
            }
            type = pkColumns.get(i).getDataType();

            //for fixed width data types like CHAR and BINARY, we need to pad values to be of max length.
            Object paddedObj = type.pad(values[i - offset], pkColumns.get(i).getMaxLength());
            byte[] value = type.toBytes(paddedObj);
            output.write(value);
        }
        return output.toByteArray();
    } finally {
        try {
            output.close();
        } catch (IOException e) {
            throw new RuntimeException(e); // Impossible
        }
    }
}
 
Example 15
Source File: SchemaUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
/**
 * Estimate the max key length in bytes of the PK for a given table
 * @param table the table
 * @return the max PK length
 */
public static int estimateKeyLength(PTable table) {
    int maxKeyLength = 0;
    // Calculate the max length of a key (each part must currently be of a fixed width)
    int i = 0;
    List<PColumn> columns = table.getPKColumns();
    while (i < columns.size()) {
        PColumn keyColumn = columns.get(i++);
        PDataType type = keyColumn.getDataType();
        Integer maxLength = keyColumn.getMaxLength();
        maxKeyLength += !type.isFixedWidth() ? VAR_LENGTH_ESTIMATE : maxLength == null ? type.getByteSize() : maxLength;
    }
    return maxKeyLength;
}
 
Example 16
Source File: BaseQueryPlan.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void serializeViewConstantsIntoScan(Scan scan, PTable dataTable) {
    int dataPosOffset = (dataTable.getBucketNum() != null ? 1 : 0) + (dataTable.isMultiTenant() ? 1 : 0);
    int nViewConstants = 0;
    if (dataTable.getType() == PTableType.VIEW) {
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        List<PColumn> dataPkColumns = dataTable.getPKColumns();
        for (int i = dataPosOffset; i < dataPkColumns.size(); i++) {
            PColumn dataPKColumn = dataPkColumns.get(i);
            if (dataPKColumn.getViewConstant() != null) {
                nViewConstants++;
            }
        }
        if (nViewConstants > 0) {
            byte[][] viewConstants = new byte[nViewConstants][];
            int j = 0;
            for (int i = dataPosOffset; i < dataPkColumns.size(); i++) {
                PColumn dataPkColumn = dataPkColumns.get(i);
                if (dataPkColumn.getViewConstant() != null) {
                    if (IndexUtil.getViewConstantValue(dataPkColumn, ptr)) {
                        viewConstants[j++] = ByteUtil.copyKeyBytesIfNecessary(ptr);
                    } else {
                        throw new IllegalStateException();
                    }
                }
            }
            serializeViewConstantsIntoScan(viewConstants, scan);
        }
    }
}
 
Example 17
Source File: IndexExpressionParseNodeRewriter.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public IndexExpressionParseNodeRewriter(PTable index, PhoenixConnection connection) throws SQLException {
      indexedParseNodeToColumnParseNodeMap = Maps.newHashMapWithExpectedSize(index.getColumns().size());
      NamedTableNode tableNode = NamedTableNode.create(null,
              TableName.create(index.getParentSchemaName().getString(), index.getParentTableName().getString()),
              Collections.<ColumnDef> emptyList());
      ColumnResolver dataResolver = FromCompiler.getResolver(tableNode, connection);
      StatementContext context = new StatementContext(new PhoenixStatement(connection), dataResolver);
      IndexStatementRewriter rewriter = new IndexStatementRewriter(dataResolver, null);
      ExpressionCompiler expressionCompiler = new ExpressionCompiler(context);
      ColumnParseNodeVisitor columnParseNodeVisitor = new ColumnParseNodeVisitor();
      int indexPosOffset = (index.getBucketNum() == null ? 0 : 1) + (index.isMultiTenant() ? 1 : 0) + (index.getViewIndexId() == null ? 0 : 1);
      List<PColumn> pkColumns = index.getPKColumns();
for (int i=indexPosOffset; i<pkColumns.size(); ++i) {
      	PColumn column = pkColumns.get(i);
      	String expressionStr = IndexUtil.getIndexColumnExpressionStr(column);
          ParseNode expressionParseNode  = SQLParser.parseCondition(expressionStr);
          columnParseNodeVisitor.reset();
          expressionParseNode.accept(columnParseNodeVisitor);
          String colName = column.getName().getString();
          if (columnParseNodeVisitor.isParseNodeCaseSensitive()) {
              // force column name to be case sensitive name by surround with double quotes
              colName = "\"" + colName + "\"";
          }
          
          Expression dataExpression = expressionParseNode.accept(expressionCompiler);
          PDataType expressionDataType = dataExpression.getDataType();
          ParseNode indexedParseNode = expressionParseNode.accept(rewriter);
          PDataType indexColType = IndexUtil.getIndexColumnDataType(dataExpression.isNullable(), expressionDataType);
          ParseNode columnParseNode = new ColumnParseNode(null, colName, null);
          if ( indexColType != expressionDataType) {
              columnParseNode = NODE_FACTORY.cast(columnParseNode, expressionDataType, null, null);
          }
          indexedParseNodeToColumnParseNodeMap.put(indexedParseNode, columnParseNode);
      }
  }
 
Example 18
Source File: PhoenixDatabaseMetaData.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public ResultSet getPrimaryKeys(String catalog, String schemaName, String tableName)
        throws SQLException {
    if (tableName == null || tableName.length() == 0) {
        return emptyResultSet;
    }
    String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
    PTable table = PhoenixRuntime.getTableNoCache(connection, fullTableName);
    boolean isSalted = table.getBucketNum() != null;
    boolean tenantColSkipped = false;
    List<PColumn> pkColumns = table.getPKColumns();
    List<PColumn> sorderPkColumns =
            Lists.newArrayList(pkColumns.subList(isSalted ? 1 : 0, pkColumns.size()));
    // sort the columns by name
    Collections.sort(sorderPkColumns, new Comparator<PColumn>(){
        @Override public int compare(PColumn c1, PColumn c2) {
            return c1.getName().getString().compareTo(c2.getName().getString());
        }
    });

    try {
    List<Tuple> tuples = Lists.newArrayListWithExpectedSize(10);
    ResultSet rs = getTables(catalog, schemaName, tableName, null);
    while (rs.next()) {
        String tenantId = rs.getString(TABLE_CAT);
        for (PColumn column : sorderPkColumns) {
            String columnName = column.getName().getString();
            // generate row key
            // TENANT_ID, TABLE_SCHEM, TABLE_NAME , COLUMN_NAME are row key columns
            byte[] rowKey =
                    SchemaUtil.getColumnKey(tenantId, schemaName, tableName, columnName, null);

            // add one cell for each column info
            List<Cell> cells = Lists.newArrayListWithCapacity(8);
            // KEY_SEQ_COLUMN
            byte[] keySeqBytes = ByteUtil.EMPTY_BYTE_ARRAY;
            int pkPos = pkColumns.indexOf(column);
            if (pkPos != -1) {
                short keySeq =
                        (short) (pkPos + 1 - (isSalted ? 1 : 0) - (tenantColSkipped ? 1 : 0));
                keySeqBytes = PSmallint.INSTANCE.toBytes(keySeq);
            }
            cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, KEY_SEQ_BYTES,
                MetaDataProtocol.MIN_TABLE_TIMESTAMP, keySeqBytes));
            // PK_NAME
            cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, PK_NAME_BYTES,
                MetaDataProtocol.MIN_TABLE_TIMESTAMP, table.getPKName() != null
                        ? table.getPKName().getBytes() : ByteUtil.EMPTY_BYTE_ARRAY));
            // ASC_OR_DESC
            char sortOrder = column.getSortOrder() == SortOrder.ASC ? 'A' : 'D';
            cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES,
                ASC_OR_DESC_BYTES, MetaDataProtocol.MIN_TABLE_TIMESTAMP,
                Bytes.toBytes(sortOrder)));
            // DATA_TYPE
            cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, DATA_TYPE_BYTES,
                MetaDataProtocol.MIN_TABLE_TIMESTAMP,
                PInteger.INSTANCE.toBytes(column.getDataType().getResultSetSqlType())));
            // TYPE_NAME
            cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES,
                Bytes.toBytes(TYPE_NAME), MetaDataProtocol.MIN_TABLE_TIMESTAMP,
                column.getDataType().getSqlTypeNameBytes()));
            // COLUMN_SIZE
            cells.add(
                PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, COLUMN_SIZE_BYTES,
                    MetaDataProtocol.MIN_TABLE_TIMESTAMP,
                    column.getMaxLength() != null
                            ? PInteger.INSTANCE.toBytes(column.getMaxLength())
                            : ByteUtil.EMPTY_BYTE_ARRAY));
            // TYPE_ID
            cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES,
                Bytes.toBytes(TYPE_ID), MetaDataProtocol.MIN_TABLE_TIMESTAMP,
                PInteger.INSTANCE.toBytes(column.getDataType().getSqlType())));
            // VIEW_CONSTANT
            cells.add(PhoenixKeyValueUtil.newKeyValue(rowKey, TABLE_FAMILY_BYTES, VIEW_CONSTANT_BYTES,
                MetaDataProtocol.MIN_TABLE_TIMESTAMP, column.getViewConstant() != null
                        ? column.getViewConstant() : ByteUtil.EMPTY_BYTE_ARRAY));
            Collections.sort(cells, new CellComparatorImpl());
            Tuple tuple = new MultiKeyValueTuple(cells);
            tuples.add(tuple);
        }
    }
    return new PhoenixResultSet(new MaterializedResultIterator(tuples),
            GET_PRIMARY_KEYS_ROW_PROJECTOR,
            new StatementContext(new PhoenixStatement(connection), false));
    } finally {
        if (connection.getAutoCommit()) {
            connection.commit();
        }
    }
}
 
Example 19
Source File: PhoenixMetadata.java    From presto with Apache License 2.0 4 votes vote down vote up
private Map<String, Object> getTableProperties(ConnectorSession session, JdbcTableHandle handle)
{
    ImmutableMap.Builder<String, Object> properties = ImmutableMap.builder();

    try (PhoenixConnection connection = phoenixClient.getConnection(JdbcIdentity.from(session));
            HBaseAdmin admin = connection.getQueryServices().getAdmin()) {
        String schemaName = toPhoenixSchemaName(Optional.ofNullable(handle.getSchemaName())).orElse(null);
        PTable table = getTable(connection, SchemaUtil.getTableName(schemaName, handle.getTableName()));

        boolean salted = table.getBucketNum() != null;
        StringJoiner joiner = new StringJoiner(",");
        List<PColumn> pkColumns = table.getPKColumns();
        for (PColumn pkColumn : pkColumns.subList(salted ? 1 : 0, pkColumns.size())) {
            joiner.add(pkColumn.getName().getString());
        }
        properties.put(PhoenixTableProperties.ROWKEYS, joiner.toString());

        if (table.getBucketNum() != null) {
            properties.put(PhoenixTableProperties.SALT_BUCKETS, table.getBucketNum());
        }
        if (table.isWALDisabled()) {
            properties.put(PhoenixTableProperties.DISABLE_WAL, table.isWALDisabled());
        }
        if (table.isImmutableRows()) {
            properties.put(PhoenixTableProperties.IMMUTABLE_ROWS, table.isImmutableRows());
        }

        String defaultFamilyName = QueryConstants.DEFAULT_COLUMN_FAMILY;
        if (table.getDefaultFamilyName() != null) {
            defaultFamilyName = table.getDefaultFamilyName().getString();
            properties.put(PhoenixTableProperties.DEFAULT_COLUMN_FAMILY, defaultFamilyName);
        }

        HTableDescriptor tableDesc = admin.getTableDescriptor(table.getPhysicalName().getBytes());

        HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
        for (HColumnDescriptor columnFamily : columnFamilies) {
            if (columnFamily.getNameAsString().equals(defaultFamilyName)) {
                if (!"NONE".equals(columnFamily.getBloomFilterType().toString())) {
                    properties.put(PhoenixTableProperties.BLOOMFILTER, columnFamily.getBloomFilterType().toString());
                }
                if (columnFamily.getMaxVersions() != 1) {
                    properties.put(PhoenixTableProperties.VERSIONS, columnFamily.getMaxVersions());
                }
                if (columnFamily.getMinVersions() > 0) {
                    properties.put(PhoenixTableProperties.MIN_VERSIONS, columnFamily.getMinVersions());
                }
                if (!columnFamily.getCompression().toString().equals("NONE")) {
                    properties.put(PhoenixTableProperties.COMPRESSION, columnFamily.getCompression().toString());
                }
                if (columnFamily.getTimeToLive() < FOREVER) {
                    properties.put(PhoenixTableProperties.TTL, columnFamily.getTimeToLive());
                }
                break;
            }
        }
    }
    catch (IOException | SQLException e) {
        throw new PrestoException(PHOENIX_METADATA_ERROR, "Couldn't get Phoenix table properties", e);
    }
    return properties.build();
}
 
Example 20
Source File: AppendOnlySchemaIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private void testAddColumns(boolean sameClient) throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    try (Connection conn1 = DriverManager.getConnection(getUrl(), props);
            Connection conn2 = sameClient ? conn1 : DriverManager.getConnection(getUrl(), props)) {

        String metricTableName = generateUniqueName();
        String viewName = generateUniqueName();
        String metricIdSeqTableName = generateUniqueName();

        // create sequence for auto partition
        conn1.createStatement().execute("CREATE SEQUENCE " + metricIdSeqTableName + " CACHE 1");
        // create base table
        conn1.createStatement().execute("CREATE TABLE " + metricTableName + " (metricId INTEGER NOT NULL, metricVal1 DOUBLE, CONSTRAINT PK PRIMARY KEY(metricId))"
                + " APPEND_ONLY_SCHEMA = true, UPDATE_CACHE_FREQUENCY=1, AUTO_PARTITION_SEQ=" + metricIdSeqTableName);
        // create view
        String ddl =
                "CREATE VIEW IF NOT EXISTS "
                        + viewName + "( hostName varchar NOT NULL,"
                        + " CONSTRAINT HOSTNAME_PK PRIMARY KEY (hostName))"
                        + " AS SELECT * FROM " + metricTableName
                        + " UPDATE_CACHE_FREQUENCY=300000";
        conn1.createStatement().execute(ddl);
        
        conn1.createStatement().execute("UPSERT INTO " + viewName + "(hostName, metricVal1) VALUES('host1', 1.0)");
        conn1.commit();

        // execute ddl that creates that same view with an additional pk column and regular column
        // and also changes the order of the pk columns (which is not respected since we only 
        // allow appending columns)
        ddl =
                "CREATE VIEW IF NOT EXISTS "
                        + viewName + "( instanceName varchar, hostName varchar, metricVal2 double, metricVal1 double"
                        + " CONSTRAINT HOSTNAME_PK PRIMARY KEY (instancename, hostName))"
                        + " AS SELECT * FROM " + metricTableName
                        + " UPDATE_CACHE_FREQUENCY=300000";
        conn2.createStatement().execute(ddl);

        conn2.createStatement().execute(
            "UPSERT INTO " + viewName + "(hostName, instanceName, metricVal1, metricval2) VALUES('host2', 'instance2', 21.0, 22.0)");
        conn2.commit();
        
        conn1.createStatement().execute("UPSERT INTO " + viewName + "(hostName, metricVal1) VALUES('host3', 3.0)");
        conn1.commit();
        
        // verify data exists
        ResultSet rs = conn2.createStatement().executeQuery("SELECT * from " + viewName);
        
        // verify the two columns were added correctly
        PTable table =
                conn2.unwrap(PhoenixConnection.class).getTable(new PTableKey(null, viewName));
        List<PColumn> pkColumns = table.getPKColumns();
        assertEquals(3,table.getPKColumns().size());
        // even though the second create view statement changed the order of the pk, the original order is maintained
        PColumn metricId = pkColumns.get(0);
        assertEquals("METRICID", metricId.getName().getString());
        assertFalse(metricId.isNullable());
        PColumn hostName = pkColumns.get(1);
        assertEquals("HOSTNAME", hostName.getName().getString());
        // hostname name is not nullable even though the second create statement changed it to nullable
        // since we only allow appending columns
        assertFalse(hostName.isNullable());
        PColumn instanceName = pkColumns.get(2);
        assertEquals("INSTANCENAME", instanceName.getName().getString());
        assertTrue(instanceName.isNullable());
        List<PColumn> columns = table.getColumns();
        assertEquals("METRICID", columns.get(0).getName().getString());
        assertEquals("METRICVAL1", columns.get(1).getName().getString());
        assertEquals("HOSTNAME", columns.get(2).getName().getString());
        assertEquals("INSTANCENAME", columns.get(3).getName().getString());
        assertEquals("METRICVAL2", columns.get(4).getName().getString());
        
        // verify the data
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
        assertEquals(1.0, rs.getDouble(2), 1e-6);
        assertEquals("host1", rs.getString(3));
        assertEquals(null, rs.getString(4));
        assertEquals(0.0, rs.getDouble(5), 1e-6);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
        assertEquals(21.0, rs.getDouble(2), 1e-6);
        assertEquals("host2", rs.getString(3));
        assertEquals("instance2", rs.getString(4));
        assertEquals(22.0, rs.getDouble(5), 1e-6);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
        assertEquals(3.0, rs.getDouble(2), 1e-6);
        assertEquals("host3", rs.getString(3));
        assertEquals(null, rs.getString(4));
        assertEquals(0.0, rs.getDouble(5), 1e-6);
        assertFalse(rs.next());
    }
}