Java Code Examples for org.apache.phoenix.util.SchemaUtil#getTableKey()

The following examples show how to use org.apache.phoenix.util.SchemaUtil#getTableKey() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ConnectionQueryServicesImpl.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public MetaDataMutationResult updateIndexState(final List<Mutation> tableMetaData, String parentTableName) throws SQLException {
    byte[][] rowKeyMetadata = new byte[3][];
    SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata);
    byte[] tableKey = SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX], rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX]);
    return metaDataCoprocessorExec(tableKey,
            new Batch.Call<MetaDataService, MetaDataResponse>() {
                @Override
                public MetaDataResponse call(MetaDataService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<MetaDataResponse> rpcCallback =
                            new BlockingRpcCallback<MetaDataResponse>();
                    UpdateIndexStateRequest.Builder builder = UpdateIndexStateRequest.newBuilder();
                    for (Mutation m : tableMetaData) {
                        MutationProto mp = ProtobufUtil.toProto(m);
                        builder.addTableMetadataMutations(mp.toByteString());
                    }
                    instance.updateIndexState(controller, builder.build(), rpcCallback);
                    if(controller.getFailedOn() != null) {
                        throw controller.getFailedOn();
                    }
                    return rpcCallback.get();
                }
            });
}
 
Example 2
Source File: MetaDataEndpointImpl.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Override
public void clearTableFromCache(RpcController controller, ClearTableFromCacheRequest request,
        RpcCallback<ClearTableFromCacheResponse> done) {
    byte[] schemaName = request.getSchemaName().toByteArray();
    byte[] tableName = request.getTableName().toByteArray();
    try {
        byte[] tenantId = request.getTenantId().toByteArray();
        byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
        ImmutableBytesPtr cacheKey = new ImmutableBytesPtr(key);
        Cache<ImmutableBytesPtr, PTable> metaDataCache =
                GlobalCache.getInstance(this.env).getMetaDataCache();
        metaDataCache.invalidate(cacheKey);
    } catch (Throwable t) {
        logger.error("incrementTableTimeStamp failed", t);
        ProtobufUtil.setControllerException(controller,
            ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
 
Example 3
Source File: ConnectionQueryServicesImpl.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public MetaDataMutationResult getTable(final PName tenantId, final byte[] schemaBytes, final byte[] tableBytes,
        final long tableTimestamp, final long clientTimestamp) throws SQLException {
    final byte[] tenantIdBytes = tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes();
    byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
    return metaDataCoprocessorExec(tableKey,
        new Batch.Call<MetaDataService, MetaDataResponse>() {
            @Override
            public MetaDataResponse call(MetaDataService instance) throws IOException {
                ServerRpcController controller = new ServerRpcController();
                BlockingRpcCallback<MetaDataResponse> rpcCallback =
                        new BlockingRpcCallback<MetaDataResponse>();
                GetTableRequest.Builder builder = GetTableRequest.newBuilder();
                builder.setTenantId(HBaseZeroCopyByteString.wrap(tenantIdBytes));
                builder.setSchemaName(HBaseZeroCopyByteString.wrap(schemaBytes));
                builder.setTableName(HBaseZeroCopyByteString.wrap(tableBytes));
                builder.setTableTimestamp(tableTimestamp);
                builder.setClientTimestamp(clientTimestamp);

               instance.getTable(controller, builder.build(), rpcCallback);
               if(controller.getFailedOn() != null) {
                   throw controller.getFailedOn();
               }
               return rpcCallback.get();
            }
        });
}
 
Example 4
Source File: MetaDataEndpointImpl.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void addIndexToTable(PName tenantId, PName schemaName, PName indexName, PName tableName, long clientTimeStamp, List<PTable> indexes) throws IOException, SQLException {
    byte[] key = SchemaUtil.getTableKey(tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes(), schemaName.getBytes(), indexName.getBytes());
    PTable indexTable = doGetTable(key, clientTimeStamp);
    if (indexTable == null) {
        ServerUtil.throwIOException("Index not found", new TableNotFoundException(schemaName.getString(), indexName.getString()));
        return;
    }
    indexes.add(indexTable);
}
 
Example 5
Source File: ConnectionQueryServicesImpl.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public MetaDataMutationResult dropTable(final List<Mutation> tableMetaData, final PTableType tableType, final boolean cascade) throws SQLException {
    byte[][] rowKeyMetadata = new byte[3][];
    SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata);
    byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
    byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
    byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantIdBytes, schemaBytes, tableBytes);
    final MetaDataMutationResult result =  metaDataCoprocessorExec(tableKey,
            new Batch.Call<MetaDataService, MetaDataResponse>() {
                @Override
                public MetaDataResponse call(MetaDataService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<MetaDataResponse> rpcCallback =
                            new BlockingRpcCallback<MetaDataResponse>();
                    DropTableRequest.Builder builder = DropTableRequest.newBuilder();
                    for (Mutation m : tableMetaData) {
                        MutationProto mp = ProtobufUtil.toProto(m);
                        builder.addTableMetadataMutations(mp.toByteString());
                    }
                    builder.setTableType(tableType.getSerializedValue());
                    builder.setCascade(cascade);

                    instance.dropTable(controller, builder.build(), rpcCallback);
                    if(controller.getFailedOn() != null) {
                        throw controller.getFailedOn();
                    }
                    return rpcCallback.get();
                }
            });

    final MutationCode code = result.getMutationCode();
    switch(code) {
    case TABLE_ALREADY_EXISTS:
        ReadOnlyProps props = this.getProps();
        boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
        if (dropMetadata) {
            dropTables(result.getTableNamesToDelete());
        }
        invalidateTables(result.getTableNamesToDelete());
        if (tableType == PTableType.TABLE) {
            byte[] physicalName = SchemaUtil.getTableNameAsBytes(schemaBytes, tableBytes);
            long timestamp = MetaDataUtil.getClientTimeStamp(tableMetaData);
            ensureViewIndexTableDropped(physicalName, timestamp);
            ensureLocalIndexTableDropped(physicalName, timestamp);
            tableStatsCache.invalidate(new ImmutableBytesPtr(physicalName));
        }
        break;
    default:
        break;
    }
      return result;
}
 
Example 6
Source File: ConnectionQueryServicesImpl.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public MetaDataMutationResult dropColumn(final List<Mutation> tableMetaData, PTableType tableType) throws SQLException {
    byte[][] rowKeyMetadata = new byte[3][];
    SchemaUtil.getVarChars(tableMetaData.get(0).getRow(), rowKeyMetadata);
    byte[] tenantIdBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
    byte[] schemaBytes = rowKeyMetadata[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
    byte[] tableBytes = rowKeyMetadata[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
    byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
    MetaDataMutationResult result = metaDataCoprocessorExec(tableKey,
        new Batch.Call<MetaDataService, MetaDataResponse>() {
            @Override
            public MetaDataResponse call(MetaDataService instance) throws IOException {
                ServerRpcController controller = new ServerRpcController();
                BlockingRpcCallback<MetaDataResponse> rpcCallback =
                        new BlockingRpcCallback<MetaDataResponse>();
                DropColumnRequest.Builder builder = DropColumnRequest.newBuilder();
                for (Mutation m : tableMetaData) {
                    MutationProto mp = ProtobufUtil.toProto(m);
                    builder.addTableMetadataMutations(mp.toByteString());
                }
                instance.dropColumn(controller, builder.build(), rpcCallback);
                if(controller.getFailedOn() != null) {
                    throw controller.getFailedOn();
                }
                return rpcCallback.get();
            }
        });
    final MutationCode code = result.getMutationCode();
    switch(code) {
    case TABLE_ALREADY_EXISTS:
        final ReadOnlyProps props = this.getProps();
        final boolean dropMetadata = props.getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
        if (dropMetadata) {
            dropTables(result.getTableNamesToDelete());
        }
        invalidateTables(result.getTableNamesToDelete());
        break;
    default:
        break;
    }
    return result;

}
 
Example 7
Source File: MetaDataEndpointImpl.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public void getTable(RpcController controller, GetTableRequest request,
        RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    byte[] tenantId = request.getTenantId().toByteArray();
    byte[] schemaName = request.getSchemaName().toByteArray();
    byte[] tableName = request.getTableName().toByteArray();
    byte[] key = SchemaUtil.getTableKey(tenantId, schemaName, tableName);
    long tableTimeStamp = request.getTableTimestamp();

    try {
        // TODO: check that key is within region.getStartKey() and region.getEndKey()
        // and return special code to force client to lookup region from meta.
        HRegion region = env.getRegion();
        MetaDataMutationResult result = checkTableKeyInRegion(key, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }

        long currentTime = EnvironmentEdgeManager.currentTimeMillis();
        PTable table = doGetTable(key, request.getClientTimestamp());
        if (table == null) {
            builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_NOT_FOUND);
            builder.setMutationTime(currentTime);
            done.run(builder.build());
            return;
        }
        builder.setReturnCode(MetaDataProtos.MutationCode.TABLE_ALREADY_EXISTS);
        builder.setMutationTime(currentTime);

        if (table.getTimeStamp() != tableTimeStamp) {
            builder.setTable(PTableImpl.toProto(table));
        }
        done.run(builder.build());
        return;
    } catch (Throwable t) {
    	logger.error("getTable failed", t);
        ProtobufUtil.setControllerException(controller,
            ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
 
Example 8
Source File: MetaDataEndpointImpl.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public void dropTable(RpcController controller, DropTableRequest request,
        RpcCallback<MetaDataResponse> done) {
    MetaDataResponse.Builder builder = MetaDataResponse.newBuilder();
    boolean isCascade = request.getCascade();
    byte[][] rowKeyMetaData = new byte[3][];
    String tableType = request.getTableType();
    byte[] schemaName = null;
    byte[] tableName = null;

    try {
        List<Mutation> tableMetadata = ProtobufUtil.getMutations(request);
        MetaDataUtil.getTenantIdAndSchemaAndTableName(tableMetadata, rowKeyMetaData);
        byte[] tenantIdBytes = rowKeyMetaData[PhoenixDatabaseMetaData.TENANT_ID_INDEX];
        schemaName = rowKeyMetaData[PhoenixDatabaseMetaData.SCHEMA_NAME_INDEX];
        tableName = rowKeyMetaData[PhoenixDatabaseMetaData.TABLE_NAME_INDEX];
        // Disallow deletion of a system table
        if (tableType.equals(PTableType.SYSTEM.getSerializedValue())) {
            builder.setReturnCode(MetaDataProtos.MutationCode.UNALLOWED_TABLE_MUTATION);
            builder.setMutationTime(EnvironmentEdgeManager.currentTimeMillis());
            done.run(builder.build());
            return;
        }
        List<byte[]> tableNamesToDelete = Lists.newArrayList();
        byte[] parentTableName = MetaDataUtil.getParentTableName(tableMetadata);
        byte[] lockTableName = parentTableName == null ? tableName : parentTableName;
        byte[] lockKey = SchemaUtil.getTableKey(tenantIdBytes, schemaName, lockTableName);
        byte[] key =
                parentTableName == null ? lockKey : SchemaUtil.getTableKey(tenantIdBytes,
                    schemaName, tableName);

        HRegion region = env.getRegion();
        MetaDataMutationResult result = checkTableKeyInRegion(key, region);
        if (result != null) {
            done.run(MetaDataMutationResult.toProto(result));
            return;
        }
        List<RowLock> locks = Lists.newArrayList();

        try {
            acquireLock(region, lockKey, locks);
            if (key != lockKey) {
                acquireLock(region, key, locks);
            }
            List<ImmutableBytesPtr> invalidateList = new ArrayList<ImmutableBytesPtr>();
            result =
                    doDropTable(key, tenantIdBytes, schemaName, tableName, parentTableName,
                        PTableType.fromSerializedValue(tableType), tableMetadata,
                        invalidateList, locks, tableNamesToDelete, isCascade);
            if (result.getMutationCode() != MutationCode.TABLE_ALREADY_EXISTS) {
                done.run(MetaDataMutationResult.toProto(result));
                return;
            }
            Cache<ImmutableBytesPtr,PTable> metaDataCache = GlobalCache.getInstance(this.env).getMetaDataCache();
            // Commit the list of deletion.
            region.mutateRowsWithLocks(tableMetadata, Collections.<byte[]> emptySet());
            long currentTime = MetaDataUtil.getClientTimeStamp(tableMetadata);
            for (ImmutableBytesPtr ckey : invalidateList) {
                metaDataCache.put(ckey, newDeletedTableMarker(currentTime));
            }
            if (parentTableName != null) {
                ImmutableBytesPtr parentCacheKey = new ImmutableBytesPtr(lockKey);
                metaDataCache.invalidate(parentCacheKey);
            }
            done.run(MetaDataMutationResult.toProto(result));
            return;
        } finally {
            region.releaseRowLocks(locks);
        }
    } catch (Throwable t) {
      logger.error("dropTable failed", t);
        ProtobufUtil.setControllerException(controller,
            ServerUtil.createIOException(SchemaUtil.getTableName(schemaName, tableName), t));
    }
}
 
Example 9
Source File: MetaDataClient.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private MutationState dropTable(String schemaName, String tableName, String parentTableName, PTableType tableType,
        boolean ifExists, boolean cascade) throws SQLException {
    connection.rollback();
    boolean wasAutoCommit = connection.getAutoCommit();
    try {
        PName tenantId = connection.getTenantId();
        String tenantIdStr = tenantId == null ? null : tenantId.getString();
        byte[] key = SchemaUtil.getTableKey(tenantIdStr, schemaName, tableName);
        Long scn = connection.getSCN();
        long clientTimeStamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
        List<Mutation> tableMetaData = Lists.newArrayListWithExpectedSize(2);
        Delete tableDelete = new Delete(key, clientTimeStamp);
        tableMetaData.add(tableDelete);
        boolean hasViewIndexTable = false;
        boolean hasLocalIndexTable = false;
        if (parentTableName != null) {
            byte[] linkKey = MetaDataUtil.getParentLinkKey(tenantIdStr, schemaName, parentTableName, tableName);
            Delete linkDelete = new Delete(linkKey, clientTimeStamp);
            tableMetaData.add(linkDelete);
        } else {
            hasViewIndexTable = MetaDataUtil.hasViewIndexTable(connection, schemaName, tableName);
            hasLocalIndexTable = MetaDataUtil.hasLocalIndexTable(connection, schemaName, tableName);
        }

        MetaDataMutationResult result = connection.getQueryServices().dropTable(tableMetaData, tableType, cascade);
        MutationCode code = result.getMutationCode();
        switch (code) {
        case TABLE_NOT_FOUND:
            if (!ifExists) { throw new TableNotFoundException(schemaName, tableName); }
            break;
        case NEWER_TABLE_FOUND:
            throw new NewerTableAlreadyExistsException(schemaName, tableName, result.getTable());
        case UNALLOWED_TABLE_MUTATION:
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_MUTATE_TABLE)

            .setSchemaName(schemaName).setTableName(tableName).build().buildException();
        default:
            connection.removeTable(tenantId, SchemaUtil.getTableName(schemaName, tableName), parentTableName,
                    result.getMutationTime());

            if (result.getTable() != null && tableType != PTableType.VIEW) {
                connection.setAutoCommit(true);
                PTable table = result.getTable();
                boolean dropMetaData = result.getTable().getViewIndexId() == null &&
                        connection.getQueryServices().getProps().getBoolean(DROP_METADATA_ATTRIB, DEFAULT_DROP_METADATA);
                long ts = (scn == null ? result.getMutationTime() : scn);
                // Create empty table and schema - they're only used to get the name from
                // PName name, PTableType type, long timeStamp, long sequenceNumber, List<PColumn> columns
                List<TableRef> tableRefs = Lists.newArrayListWithExpectedSize(2 + table.getIndexes().size());
                // All multi-tenant tables have a view index table, so no need to check in that case
                if (tableType == PTableType.TABLE
                        && (table.isMultiTenant() || hasViewIndexTable || hasLocalIndexTable)) {

                    MetaDataUtil.deleteViewIndexSequences(connection, table.getPhysicalName());
                    if (hasViewIndexTable) {
                        String viewIndexSchemaName = null;
                        String viewIndexTableName = null;
                        if (schemaName != null) {
                            viewIndexSchemaName = MetaDataUtil.getViewIndexTableName(schemaName);
                            viewIndexTableName = tableName;
                        } else {
                            viewIndexTableName = MetaDataUtil.getViewIndexTableName(tableName);
                        }
                        PTable viewIndexTable = new PTableImpl(null, viewIndexSchemaName, viewIndexTableName, ts,
                                table.getColumnFamilies());
                        tableRefs.add(new TableRef(null, viewIndexTable, ts, false));
                    }
                    if (hasLocalIndexTable) {
                        String localIndexSchemaName = null;
                        String localIndexTableName = null;
                        if (schemaName != null) {
                            localIndexSchemaName = MetaDataUtil.getLocalIndexTableName(schemaName);
                            localIndexTableName = tableName;
                        } else {
                            localIndexTableName = MetaDataUtil.getLocalIndexTableName(tableName);
                        }
                        PTable localIndexTable = new PTableImpl(null, localIndexSchemaName, localIndexTableName,
                                ts, Collections.<PColumnFamily> emptyList());
                        tableRefs.add(new TableRef(null, localIndexTable, ts, false));
                    }
                }
                tableRefs.add(new TableRef(null, table, ts, false));
                // TODO: Let the standard mutable secondary index maintenance handle this?
                for (PTable index : table.getIndexes()) {
                    tableRefs.add(new TableRef(null, index, ts, false));
                }
                deleteFromStatsTable(tableRefs, ts);
                if (!dropMetaData) {
                    MutationPlan plan = new PostDDLCompiler(connection).compile(tableRefs, null, null,
                            Collections.<PColumn> emptyList(), ts);
                    // Delete everything in the column. You'll still be able to do queries at earlier timestamps
                    return connection.getQueryServices().updateData(plan);
                }
            }
            break;
        }
        return new MutationState(0, connection);
    } finally {
        connection.setAutoCommit(wasAutoCommit);
    }
}
 
Example 10
Source File: TableInfo.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public byte[] getRowKeyPrefix() {
    return SchemaUtil.getTableKey(tenantId, schema, name);
}
 
Example 11
Source File: InvalidIndexStateClientSideIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testCachedConnections() throws Throwable {
    final String schemaName = generateUniqueName();
    final String tableName = generateUniqueName();
    final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
    final String indexName = generateUniqueName();
    final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
    final Connection conn = DriverManager.getConnection(getUrl());

    // create table and indices
    String createTableSql =
            "CREATE TABLE " + fullTableName
                    + "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, v2 INTEGER, v3 INTEGER)";
    conn.createStatement().execute(createTableSql);
    conn.createStatement()
            .execute("CREATE INDEX " + indexName + " ON " + fullTableName + "(v1)");
    conn.commit();
    PhoenixConnection phoenixConn = conn.unwrap(PhoenixConnection.class);
    ConnectionQueryServices queryServices = phoenixConn.getQueryServices();
    Table metaTable =
            phoenixConn.getQueryServices()
                    .getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
    long ts = EnvironmentEdgeManager.currentTimeMillis();
    MutationCode code =
            IndexUtil
                    .updateIndexState(fullIndexName, ts, metaTable, PIndexState.PENDING_DISABLE)
                    .getMutationCode();
    assertEquals(MutationCode.TABLE_ALREADY_EXISTS, code);
    ts = EnvironmentEdgeManager.currentTimeMillis();

    final byte[] schemaBytes = PVarchar.INSTANCE.toBytes(schemaName);
    final byte[] tableBytes = PVarchar.INSTANCE.toBytes(tableName);
    PName tenantId = phoenixConn.getTenantId();
    final long tableTimestamp = HConstants.LATEST_TIMESTAMP;
    long tableResolvedTimestamp = HConstants.LATEST_TIMESTAMP;
    final long resolvedTimestamp = tableResolvedTimestamp;
    final byte[] tenantIdBytes =
            tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes();
    byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
    Batch.Call<MetaDataService, MetaDataResponse> callable =
            new Batch.Call<MetaDataService, MetaDataResponse>() {
                @Override
                public MetaDataResponse call(MetaDataService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<MetaDataResponse> rpcCallback =
                            new BlockingRpcCallback<MetaDataResponse>();
                    GetTableRequest.Builder builder = GetTableRequest.newBuilder();
                    builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                    builder.setSchemaName(ByteStringer.wrap(schemaBytes));
                    builder.setTableName(ByteStringer.wrap(tableBytes));
                    builder.setTableTimestamp(tableTimestamp);
                    builder.setClientTimestamp(resolvedTimestamp);
                    builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION,
                        13, PHOENIX_PATCH_NUMBER));
                    instance.getTable(controller, builder.build(), rpcCallback);
                    if (controller.getFailedOn() != null) {
                        throw controller.getFailedOn();
                    }
                    return rpcCallback.get();
                }
            };
    int version = VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, 13, PHOENIX_PATCH_NUMBER);
    LOGGER.info("Client version: " + version);
    Table ht =
            queryServices.getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
    try {
        final Map<byte[], MetaDataResponse> results =
                ht.coprocessorService(MetaDataService.class, tableKey, tableKey, callable);

        assert (results.size() == 1);
        MetaDataResponse result = results.values().iterator().next();
        assert (result.getTable().getIndexesCount() == 1);
        assert (PIndexState.valueOf(result.getTable().getIndexes(0).getIndexState())
                .equals(PIndexState.DISABLE));
    } catch (Exception e) {
        LOGGER.error("Exception Occurred: " + e);

    } finally {
        Closeables.closeQuietly(ht);
    }

}