Java Code Examples for org.apache.phoenix.query.ConnectionQueryServices#getTable()

The following examples show how to use org.apache.phoenix.query.ConnectionQueryServices#getTable() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ServerCacheClient.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public boolean addServerCache(byte[] startkeyOfRegion, ServerCache cache, HashCacheFactory cacheFactory,
          byte[] txState, PTable pTable) throws Exception {
     Table table = null;
     boolean success = true;
     byte[] cacheId = cache.getId();
     try {
         ConnectionQueryServices services = connection.getQueryServices();
         
         byte[] tableName = pTable.getPhysicalName().getBytes();
         table = services.getTable(tableName);
         HRegionLocation tableRegionLocation = services.getTableRegionLocation(tableName, startkeyOfRegion);
         if(cache.isExpired(tableRegionLocation)) {
             return false;
         }
if (cache.addServer(tableRegionLocation) || services.getProps().getBoolean(HASH_JOIN_SERVER_CACHE_RESEND_PER_SERVER,false)) {
	success = addServerCache(table, startkeyOfRegion, pTable, cacheId, cache.getCachePtr(), cacheFactory,
			txState, false);
}
return success;
     } finally {
         Closeables.closeQuietly(table);
     }
 }
 
Example 2
Source File: ImmutableIndexExtendedIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private static int getRowCountForEmptyColValue(Connection conn, String tableName,
        byte[] valueBytes)  throws IOException, SQLException {

    PTable table = PhoenixRuntime.getTable(conn, tableName);
    byte[] emptyCF = SchemaUtil.getEmptyColumnFamily(table);
    byte[] emptyCQ = EncodedColumnsUtil.getEmptyKeyValueInfo(table).getFirst();
    ConnectionQueryServices queryServices =
            conn.unwrap(PhoenixConnection.class).getQueryServices();
    HTable htable = (HTable) queryServices.getTable(table.getPhysicalName().getBytes());
    Scan scan = new Scan();
    scan.addColumn(emptyCF, emptyCQ);
    ResultScanner resultScanner = htable.getScanner(scan);
    int count = 0;

    for (Result result = resultScanner.next(); result != null; result = resultScanner.next()) {
        if (Bytes.compareTo(result.getValue(emptyCF, emptyCQ), 0, valueBytes.length,
                valueBytes, 0, valueBytes.length) == 0) {
            ++count;
        }
    }
    return count;
}
 
Example 3
Source File: IndexToolForDeleteBeforeRebuildIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
/**
 * Test delete before rebuild
 */
public void testDeleteBeforeRebuildForGlobalIndex() throws Exception {
    conn.createStatement().execute(String.format(INDEX_GLOBAL_DDL, globalIndexName, dataTableFullName));
    String globalIndexUpsert = String.format(UPSERT_SQL, globalIndexFullName);
    PreparedStatement stmt = conn.prepareStatement(globalIndexUpsert);
    upsertRow(stmt, "tenantID1",11, "name11", 99911);
    conn.commit();

    ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices();
    PTable physicalTable = PhoenixRuntime.getTable(conn, globalIndexFullName);
    Table hIndexTable= queryServices.getTable(physicalTable.getPhysicalName().getBytes());
    int count = getUtility().countRows(hIndexTable);
    // Confirm index has rows.
    assertEquals(4, count);

    runIndexTool(schemaName, dataTableName, globalIndexName, 0);

    count = getUtility().countRows(hIndexTable);

    // Confirm index has all the data rows
    assertEquals(3, count);
}
 
Example 4
Source File: BaseStatsCollectorIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void verifyGuidePostGenerated(ConnectionQueryServices queryServices,
        String tableName, String[] familyNames,
        long guidePostWidth, boolean emptyGuidePostExpected) throws Exception {
    try (Table statsHTable =
            queryServices.getTable(
                    SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES,
                            queryServices.getProps()).getName())) {
        for (String familyName : familyNames) {
            GuidePostsInfo gps =
                    StatisticsUtil.readStatistics(statsHTable,
                            new GuidePostsKey(Bytes.toBytes(tableName), Bytes.toBytes(familyName)),
                            HConstants.LATEST_TIMESTAMP);
            assertTrue(emptyGuidePostExpected ? gps.isEmptyGuidePost() : !gps.isEmptyGuidePost());
            assertTrue(gps.getByteCounts()[0] >= guidePostWidth);
            assertTrue(gps.getGuidePostTimestamps()[0] > 0);
        }
    }
}
 
Example 5
Source File: FromCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static ColumnResolver getResolverForCreation(final CreateTableStatement statement, final PhoenixConnection connection)
        throws SQLException {
    TableName baseTable = statement.getBaseTableName();
    if (baseTable == null) {
        return EMPTY_TABLE_RESOLVER;
    }
    NamedTableNode tableNode = NamedTableNode.create(null, baseTable, Collections.<ColumnDef>emptyList());
    // Always use non-tenant-specific connection here
    try {
        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true);
        return visitor;
    } catch (TableNotFoundException e) {
        // Used for mapped VIEW, since we won't be able to resolve that.
        // Instead, we create a table with just the dynamic columns.
        // A tenant-specific connection may not create a mapped VIEW.
        if (connection.getTenantId() == null && statement.getTableType() == PTableType.VIEW) {
            ConnectionQueryServices services = connection.getQueryServices();
            byte[] fullTableName = SchemaUtil.getTableNameAsBytes(baseTable.getSchemaName(), baseTable.getTableName());
            HTableInterface htable = null;
            try {
                htable = services.getTable(fullTableName);
            } catch (UnsupportedOperationException ignore) {
                throw e; // For Connectionless
            } finally {
                if (htable != null) Closeables.closeQuietly(htable);
            }
            tableNode = NamedTableNode.create(null, baseTable, statement.getColumnDefs());
            return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp());
        }
        throw e;
    }
}
 
Example 6
Source File: DynamicColumnIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("deprecation")
private static void initTableValues() throws Exception {
    ConnectionQueryServices services = driver.getConnectionQueryServices(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES));
    HTableInterface hTable = services.getTable(SchemaUtil.getTableNameAsBytes(HBASE_DYNAMIC_COLUMNS_SCHEMA_NAME,HBASE_DYNAMIC_COLUMNS));
    try {
        // Insert rows using standard HBase mechanism with standard HBase "types"
        List<Row> mutations = new ArrayList<Row>();
        byte[] dv = Bytes.toBytes("DV");
        byte[] first = Bytes.toBytes("F");
        byte[] f1v1 = Bytes.toBytes("F1V1");
        byte[] f1v2 = Bytes.toBytes("F1V2");
        byte[] f2v1 = Bytes.toBytes("F2V1");
        byte[] f2v2 = Bytes.toBytes("F2V2");
        byte[] key = Bytes.toBytes("entry1");

        Put put = new Put(key);
        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
        put.add(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
        put.add(FAMILY_NAME, f1v1, Bytes.toBytes("f1value1"));
        put.add(FAMILY_NAME, f1v2, Bytes.toBytes("f1value2"));
        put.add(FAMILY_NAME2, f2v1, Bytes.toBytes("f2value1"));
        put.add(FAMILY_NAME2, f2v2, Bytes.toBytes("f2value2"));
        mutations.add(put);

        hTable.batch(mutations);

    } finally {
        hTable.close();
    }
    // Create Phoenix table after HBase table was created through the native APIs
    // The timestamp of the table creation must be later than the timestamp of the data
    ensureTableCreated(getUrl(), HBASE_DYNAMIC_COLUMNS);
}
 
Example 7
Source File: IndexVerificationOutputRepository.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
public IndexVerificationOutputRepository(byte[] indexName, Connection conn) throws SQLException {
    ConnectionQueryServices queryServices =
        conn.unwrap(PhoenixConnection.class).getQueryServices();
    outputTable = queryServices.getTable(OUTPUT_TABLE_NAME_BYTES);
    indexTable = queryServices.getTable(indexName);
}
 
Example 8
Source File: TestUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static void assertRawCellCount(Connection conn, TableName tableName,
                                      byte[] row, int expectedCellCount)
    throws SQLException, IOException{
    ConnectionQueryServices cqs = conn.unwrap(PhoenixConnection.class).getQueryServices();
    Table table = cqs.getTable(tableName.getName());
    CellCount cellCount = getCellCount(table, true);
    int count = cellCount.getCellCount(Bytes.toString(row));
    assertEquals(expectedCellCount, count);
}
 
Example 9
Source File: ServerCacheClient.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Remove the cached table from all region servers
 * @param cacheId unique identifier for the hash join (returned from {@link #addHashCache(HTable, Scan, Set)})
 * @param servers list of servers upon which table was cached (filled in by {@link #addHashCache(HTable, Scan, Set)})
 * @throws SQLException
 * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
 */
private void removeServerCache(final byte[] cacheId, Set<HRegionLocation> servers) throws SQLException {
	ConnectionQueryServices services = connection.getQueryServices();
	Throwable lastThrowable = null;
	TableRef cacheUsingTableRef = cacheUsingTableRefMap.get(Bytes.mapKey(cacheId));
	byte[] tableName = cacheUsingTableRef.getTable().getPhysicalName().getBytes();
	HTableInterface iterateOverTable = services.getTable(tableName);
	try {
		List<HRegionLocation> locations = services.getAllTableRegions(tableName);
		Set<HRegionLocation> remainingOnServers = new HashSet<HRegionLocation>(servers);
		/**
		 * Allow for the possibility that the region we based where to send our cache has split and been
		 * relocated to another region server *after* we sent it, but before we removed it. To accommodate
		 * this, we iterate through the current metadata boundaries and remove the cache once for each
		 * server that we originally sent to.
		 */
		if (LOG.isDebugEnabled()) {LOG.debug(addCustomAnnotations("Removing Cache " + cacheId + " from servers.", connection));}
		for (HRegionLocation entry : locations) {
			if (remainingOnServers.contains(entry)) {  // Call once per server
				try {
					byte[] key = entry.getRegionInfo().getStartKey();
					iterateOverTable.coprocessorService(ServerCachingService.class, key, key, 
							new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() {
						@Override
						public RemoveServerCacheResponse call(ServerCachingService instance) throws IOException {
							ServerRpcController controller = new ServerRpcController();
							BlockingRpcCallback<RemoveServerCacheResponse> rpcCallback =
									new BlockingRpcCallback<RemoveServerCacheResponse>();
							RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest.newBuilder();
							if(connection.getTenantId() != null){
								builder.setTenantId(HBaseZeroCopyByteString.wrap(connection.getTenantId().getBytes()));
							}
							builder.setCacheId(HBaseZeroCopyByteString.wrap(cacheId));
							instance.removeServerCache(controller, builder.build(), rpcCallback);
							if(controller.getFailedOn() != null) {
								throw controller.getFailedOn();
							}
							return rpcCallback.get(); 
						}
					});
					remainingOnServers.remove(entry);
				} catch (Throwable t) {
					lastThrowable = t;
					LOG.error(addCustomAnnotations("Error trying to remove hash cache for " + entry, connection), t);
				}
			}
		}
		if (!remainingOnServers.isEmpty()) {
			LOG.warn(addCustomAnnotations("Unable to remove hash cache for " + remainingOnServers, connection), lastThrowable);
		}
	} finally {
		Closeables.closeQuietly(iterateOverTable);
	}
}
 
Example 10
Source File: FromCompiler.java    From phoenix with Apache License 2.0 4 votes vote down vote up
public static ColumnResolver getResolverForCreation(final CreateTableStatement statement, final PhoenixConnection connection)
        throws SQLException {
	
    TableName baseTable = statement.getBaseTableName();
    String schemaName;
    if (baseTable == null) {
        if (SchemaUtil.isSchemaCheckRequired(statement.getTableType(), connection.getQueryServices().getProps())) {
            schemaName = statement.getTableName().getSchemaName();
            if (schemaName != null) {
                new SchemaResolver(connection, statement.getTableName().getSchemaName(), true);
            } else if (connection.getSchema() != null) {
                // To ensure schema set through properties or connection string exists before creating table
                new SchemaResolver(connection, connection.getSchema(), true);
            }
        }
        return EMPTY_TABLE_RESOLVER;
    }
    NamedTableNode tableNode = NamedTableNode.create(null, baseTable, Collections.<ColumnDef>emptyList());
    // Always use non-tenant-specific connection here
    try {
        SingleTableColumnResolver visitor = new SingleTableColumnResolver(connection, tableNode, true);
        return visitor;
    } catch (TableNotFoundException e) {
        // Used for mapped VIEW, since we won't be able to resolve that.
        // Instead, we create a table with just the dynamic columns.
        // A tenant-specific connection may not create a mapped VIEW.
        if (connection.getTenantId() == null && statement.getTableType() == PTableType.VIEW) {
            ConnectionQueryServices services = connection.getQueryServices();
            boolean isNamespaceMapped = SchemaUtil.isNamespaceMappingEnabled(statement.getTableType(), connection.getQueryServices().getProps());
            byte[] fullTableName = SchemaUtil.getPhysicalHBaseTableName(
                baseTable.getSchemaName(), baseTable.getTableName(), isNamespaceMapped).getBytes();
            Table htable = null;
            try {
                htable = services.getTable(fullTableName);
            } catch (UnsupportedOperationException ignore) {
                throw e; // For Connectionless
            } finally {
                if (htable != null) Closeables.closeQuietly(htable);
            }
            tableNode = NamedTableNode.create(null, baseTable, statement.getColumnDefs());
            return new SingleTableColumnResolver(connection, tableNode, e.getTimeStamp(), new HashMap<String, UDFParseNode>(1), isNamespaceMapped);
        }
        throw e;
    }
}
 
Example 11
Source File: ServerCacheClient.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Remove the cached table from all region servers
 * @throws SQLException
 * @throws IllegalStateException if hashed table cannot be removed on any region server on which it was added
 */
private void removeServerCache(final ServerCache cache, Set<HRegionLocation> remainingOnServers) throws SQLException {
    Table iterateOverTable = null;
    final byte[] cacheId = cache.getId();
    try {
        ConnectionQueryServices services = connection.getQueryServices();
        Throwable lastThrowable = null;
        final PTable cacheUsingTable = cacheUsingTableMap.get(Bytes.mapKey(cacheId));
        byte[] tableName = cacheUsingTable.getPhysicalName().getBytes();
        iterateOverTable = services.getTable(tableName);

        List<HRegionLocation> locations = services.getAllTableRegions(tableName);
        /**
         * Allow for the possibility that the region we based where to send our cache has split and been relocated
         * to another region server *after* we sent it, but before we removed it. To accommodate this, we iterate
         * through the current metadata boundaries and remove the cache once for each server that we originally sent
         * to.
         */
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug(addCustomAnnotations(
                    "Removing Cache " + cacheId + " from servers.", connection));
        }
        for (HRegionLocation entry : locations) {
         // Call once per server
            if (remainingOnServers.contains(entry)) { 
                try {
                    byte[] key = getKeyInRegion(entry.getRegion().getStartKey());
                    iterateOverTable.coprocessorService(ServerCachingService.class, key, key,
                            new Batch.Call<ServerCachingService, RemoveServerCacheResponse>() {
                                @Override
                                public RemoveServerCacheResponse call(ServerCachingService instance)
                                        throws IOException {
                                    ServerRpcController controller = new ServerRpcController();
                                    BlockingRpcCallback<RemoveServerCacheResponse> rpcCallback = new BlockingRpcCallback<RemoveServerCacheResponse>();
                                    RemoveServerCacheRequest.Builder builder = RemoveServerCacheRequest
                                            .newBuilder();
                                    final byte[] tenantIdBytes;
                                    if (cacheUsingTable.isMultiTenant()) {
                                        try {
                                            tenantIdBytes = connection.getTenantId() == null ? null
                                                    : ScanUtil.getTenantIdBytes(cacheUsingTable.getRowKeySchema(),
                                                            cacheUsingTable.getBucketNum() != null,
                                                            connection.getTenantId(),
                                                            cacheUsingTable.getViewIndexId() != null);
                                        } catch (SQLException e) {
                                            throw new IOException(e);
                                        }
                                    } else {
                                        tenantIdBytes = connection.getTenantId() == null ? null
                                                : connection.getTenantId().getBytes();
                                    }
                                    if (tenantIdBytes != null) {
                                        builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                                    }
                                    builder.setCacheId(ByteStringer.wrap(cacheId));
                                    instance.removeServerCache(controller, builder.build(), rpcCallback);
                                    if (controller.getFailedOn() != null) { throw controller.getFailedOn(); }
                                    return rpcCallback.get();
                                }
                            });
                    remainingOnServers.remove(entry);
                } catch (Throwable t) {
                    lastThrowable = t;
                    LOGGER.error(addCustomAnnotations(
                            "Error trying to remove hash cache for " + entry,
                            connection), t);
                }
            }
        }
        if (!remainingOnServers.isEmpty()) {
            LOGGER.warn(addCustomAnnotations("Unable to remove hash cache for "
                            + remainingOnServers, connection),
                    lastThrowable);
        }
    } finally {
        cacheUsingTableMap.remove(Bytes.mapKey(cacheId));
        Closeables.closeQuietly(iterateOverTable);
    }
}
 
Example 12
Source File: DynamicColumnIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Before
public void initTable() throws Exception {
    tableName = generateUniqueName();
    try (PhoenixConnection pconn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class)) {
        ConnectionQueryServices services = pconn.getQueryServices();
        try (Admin admin = services.getAdmin()) {
            TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName));
            builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES));
            builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME_A));
            builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(FAMILY_NAME_B));
            admin.createTable(builder.build());
        }

        try (Table hTable = services.getTable(Bytes.toBytes(tableName))) {
            // Insert rows using standard HBase mechanism with standard HBase "types"
            List<Row> mutations = new ArrayList<Row>();
            byte[] dv = Bytes.toBytes("DV");
            byte[] first = Bytes.toBytes("F");
            byte[] f1v1 = Bytes.toBytes("F1V1");
            byte[] f1v2 = Bytes.toBytes("F1V2");
            byte[] f2v1 = Bytes.toBytes("F2V1");
            byte[] f2v2 = Bytes.toBytes("F2V2");
            byte[] key = Bytes.toBytes("entry1");

            Put put = new Put(key);
            put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, dv, Bytes.toBytes("default"));
            put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, first, Bytes.toBytes("first"));
            put.addColumn(FAMILY_NAME_A, f1v1, Bytes.toBytes("f1value1"));
            put.addColumn(FAMILY_NAME_A, f1v2, Bytes.toBytes("f1value2"));
            put.addColumn(FAMILY_NAME_B, f2v1, Bytes.toBytes("f2value1"));
            put.addColumn(FAMILY_NAME_B, f2v2, Bytes.toBytes("f2value2"));
            mutations.add(put);

            hTable.batch(mutations, null);

            // Create Phoenix table after HBase table was created through the native APIs
            // The timestamp of the table creation must be later than the timestamp of the data
            pconn.createStatement().execute("create table " + tableName + 
            "   (entry varchar not null," +
            "    F varchar," +
            "    A.F1v1 varchar," +
            "    A.F1v2 varchar," +
            "    B.F2v1 varchar" +
            "    CONSTRAINT pk PRIMARY KEY (entry)) COLUMN_ENCODED_BYTES=NONE");
        }

    }
}
 
Example 13
Source File: IndexRebuildTaskIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testIndexRebuildTask() throws Throwable {
    String baseTable = generateUniqueName();
    String viewName = generateUniqueName();
    Connection conn = null;
    Connection tenantConn = null;
    try {
        conn = DriverManager.getConnection(getUrl());
        conn.setAutoCommit(false);
        Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
        props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, TENANT1);

        tenantConn =DriverManager.getConnection(getUrl(), props);
        String ddlFormat =
                "CREATE TABLE IF NOT EXISTS " + baseTable + "  ("
                        + " %s PK2 VARCHAR NOT NULL, V1 VARCHAR, V2 VARCHAR "
                        + " CONSTRAINT NAME_PK PRIMARY KEY (%s PK2)" + " ) %s";
        conn.createStatement().execute(generateDDL(ddlFormat));
        conn.commit();
        // Create a view
        String viewDDL = "CREATE VIEW " + viewName + " AS SELECT * FROM " + baseTable;
        tenantConn.createStatement().execute(viewDDL);

        // Create index
        String indexName = generateUniqueName();
        String idxSDDL = String.format("CREATE INDEX %s ON %s (V1)", indexName, viewName);

        tenantConn.createStatement().execute(idxSDDL);

        // Insert rows
        int numOfValues = 1000;
        for (int i=0; i < numOfValues; i++){
            tenantConn.createStatement().execute(
                    String.format("UPSERT INTO %s VALUES('%s', '%s', '%s')", viewName, String.valueOf(i), "y",
                            "z"));
        }
        tenantConn.commit();

        waitForIndexRebuild(conn, indexName, PIndexState.ACTIVE);
        String viewIndexTableName = MetaDataUtil.getViewIndexPhysicalName(baseTable);
        ConnectionQueryServices queryServices = conn.unwrap(PhoenixConnection.class).getQueryServices();

        Table indexHTable = queryServices.getTable(Bytes.toBytes(viewIndexTableName));
        int count = getUtility().countRows(indexHTable);
        assertEquals(numOfValues, count);

        // Alter to Unusable makes the index status inactive.
        // If I Alter to DISABLE, it fails to in Index tool while setting state to active due to Invalid transition.
        tenantConn.createStatement().execute(
                String.format("ALTER INDEX %s ON %s UNUSABLE", indexName, viewName));
        tenantConn.commit();

        // Remove index contents and try again
        Admin admin = queryServices.getAdmin();
        TableName tableName = TableName.valueOf(viewIndexTableName);
        admin.disableTable(tableName);
        admin.truncateTable(tableName, false);

        count = getUtility().countRows(indexHTable);
        assertEquals(0, count);

        String data = "{\"IndexName\":\"" + indexName + "\"}";

        // Run IndexRebuildTask
        TaskRegionObserver.SelfHealingTask task =
                new TaskRegionObserver.SelfHealingTask(
                        TaskRegionEnvironment, QueryServicesOptions.DEFAULT_TASK_HANDLING_MAX_INTERVAL_MS);

        Timestamp startTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis());
        Task.addTask(conn.unwrap(PhoenixConnection.class), PTable.TaskType.INDEX_REBUILD,
                TENANT1, null, viewName,
                PTable.TaskStatus.CREATED.toString(), data, null, startTs, null, true);
        task.run();

        // Check task status and other column values.
        waitForTaskState(conn, PTable.TaskType.INDEX_REBUILD, viewName, PTable.TaskStatus.COMPLETED);

        // See that index is rebuilt and confirm index has rows
        count = getUtility().countRows(indexHTable);
        assertEquals(numOfValues, count);
    } finally {
        if (conn != null) {
            conn.createStatement().execute("DELETE " + " FROM " + PhoenixDatabaseMetaData.SYSTEM_TASK_NAME
                    + " WHERE TABLE_NAME ='" + viewName  + "'");
            conn.commit();
            conn.close();
        }
        if (tenantConn != null) {
            tenantConn.close();
        }
    }
}
 
Example 14
Source File: InvalidIndexStateClientSideIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testCachedConnections() throws Throwable {
    final String schemaName = generateUniqueName();
    final String tableName = generateUniqueName();
    final String fullTableName = SchemaUtil.getTableName(schemaName, tableName);
    final String indexName = generateUniqueName();
    final String fullIndexName = SchemaUtil.getTableName(schemaName, indexName);
    final Connection conn = DriverManager.getConnection(getUrl());

    // create table and indices
    String createTableSql =
            "CREATE TABLE " + fullTableName
                    + "(org_id VARCHAR NOT NULL PRIMARY KEY, v1 INTEGER, v2 INTEGER, v3 INTEGER)";
    conn.createStatement().execute(createTableSql);
    conn.createStatement()
            .execute("CREATE INDEX " + indexName + " ON " + fullTableName + "(v1)");
    conn.commit();
    PhoenixConnection phoenixConn = conn.unwrap(PhoenixConnection.class);
    ConnectionQueryServices queryServices = phoenixConn.getQueryServices();
    Table metaTable =
            phoenixConn.getQueryServices()
                    .getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
    long ts = EnvironmentEdgeManager.currentTimeMillis();
    MutationCode code =
            IndexUtil
                    .updateIndexState(fullIndexName, ts, metaTable, PIndexState.PENDING_DISABLE)
                    .getMutationCode();
    assertEquals(MutationCode.TABLE_ALREADY_EXISTS, code);
    ts = EnvironmentEdgeManager.currentTimeMillis();

    final byte[] schemaBytes = PVarchar.INSTANCE.toBytes(schemaName);
    final byte[] tableBytes = PVarchar.INSTANCE.toBytes(tableName);
    PName tenantId = phoenixConn.getTenantId();
    final long tableTimestamp = HConstants.LATEST_TIMESTAMP;
    long tableResolvedTimestamp = HConstants.LATEST_TIMESTAMP;
    final long resolvedTimestamp = tableResolvedTimestamp;
    final byte[] tenantIdBytes =
            tenantId == null ? ByteUtil.EMPTY_BYTE_ARRAY : tenantId.getBytes();
    byte[] tableKey = SchemaUtil.getTableKey(tenantIdBytes, schemaBytes, tableBytes);
    Batch.Call<MetaDataService, MetaDataResponse> callable =
            new Batch.Call<MetaDataService, MetaDataResponse>() {
                @Override
                public MetaDataResponse call(MetaDataService instance) throws IOException {
                    ServerRpcController controller = new ServerRpcController();
                    BlockingRpcCallback<MetaDataResponse> rpcCallback =
                            new BlockingRpcCallback<MetaDataResponse>();
                    GetTableRequest.Builder builder = GetTableRequest.newBuilder();
                    builder.setTenantId(ByteStringer.wrap(tenantIdBytes));
                    builder.setSchemaName(ByteStringer.wrap(schemaBytes));
                    builder.setTableName(ByteStringer.wrap(tableBytes));
                    builder.setTableTimestamp(tableTimestamp);
                    builder.setClientTimestamp(resolvedTimestamp);
                    builder.setClientVersion(VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION,
                        13, PHOENIX_PATCH_NUMBER));
                    instance.getTable(controller, builder.build(), rpcCallback);
                    if (controller.getFailedOn() != null) {
                        throw controller.getFailedOn();
                    }
                    return rpcCallback.get();
                }
            };
    int version = VersionUtil.encodeVersion(PHOENIX_MAJOR_VERSION, 13, PHOENIX_PATCH_NUMBER);
    LOGGER.info("Client version: " + version);
    Table ht =
            queryServices.getTable(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES);
    try {
        final Map<byte[], MetaDataResponse> results =
                ht.coprocessorService(MetaDataService.class, tableKey, tableKey, callable);

        assert (results.size() == 1);
        MetaDataResponse result = results.values().iterator().next();
        assert (result.getTable().getIndexesCount() == 1);
        assert (PIndexState.valueOf(result.getTable().getIndexes(0).getIndexState())
                .equals(PIndexState.DISABLE));
    } catch (Exception e) {
        LOGGER.error("Exception Occurred: " + e);

    } finally {
        Closeables.closeQuietly(ht);
    }

}
 
Example 15
Source File: IndexToolIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testIndexToolWithTenantId() throws Exception {
    if (!useTenantId) { return;}
    String tenantId = generateUniqueName();
    String schemaName = generateUniqueName();
    String dataTableName = generateUniqueName();
    String viewTenantName = generateUniqueName();
    String indexNameGlobal = generateUniqueName();
    String indexNameTenant = generateUniqueName();
    String viewIndexTableName = "_IDX_" + dataTableName;

    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection connGlobal = DriverManager.getConnection(getUrl(), props);
    props.setProperty(PhoenixRuntime.TENANT_ID_ATTRIB, tenantId);
    Connection connTenant = DriverManager.getConnection(getUrl(), props);
    String createTblStr = "CREATE TABLE %s (TENANT_ID VARCHAR(15) NOT NULL,ID INTEGER NOT NULL"
            + ", NAME VARCHAR, CONSTRAINT PK_1 PRIMARY KEY (TENANT_ID, ID)) MULTI_TENANT=true";
    String createViewStr = "CREATE VIEW %s AS SELECT * FROM %s";

    String upsertQueryStr = "UPSERT INTO %s (TENANT_ID, ID, NAME) VALUES('%s' , %d, '%s')";
    String createIndexStr = "CREATE INDEX %s ON %s (NAME) ";

    try {
        String tableStmtGlobal = String.format(createTblStr, dataTableName);
        connGlobal.createStatement().execute(tableStmtGlobal);

        String viewStmtTenant = String.format(createViewStr, viewTenantName, dataTableName);
        connTenant.createStatement().execute(viewStmtTenant);

        String idxStmtTenant = String.format(createIndexStr, indexNameTenant, viewTenantName);
        connTenant.createStatement().execute(idxStmtTenant);

        connTenant.createStatement()
                .execute(String.format(upsertQueryStr, viewTenantName, tenantId, 1, "x"));
        connTenant.commit();

        runIndexTool(true, false, "", viewTenantName, indexNameTenant,
                tenantId, 0, new String[0]);

        String selectSql = String.format("SELECT ID FROM %s WHERE NAME='x'", viewTenantName);
        ResultSet rs = connTenant.createStatement().executeQuery("EXPLAIN " + selectSql);
        String actualExplainPlan = QueryUtil.getExplainPlan(rs);
        assertExplainPlan(false, actualExplainPlan, "", viewIndexTableName);
        rs = connTenant.createStatement().executeQuery(selectSql);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
        assertFalse(rs.next());

        // Remove from tenant view index and build.
        ConnectionQueryServices queryServices = connGlobal.unwrap(PhoenixConnection.class).getQueryServices();
        Admin admin = queryServices.getAdmin();
        TableName tableName = TableName.valueOf(viewIndexTableName);
        admin.disableTable(tableName);
        admin.truncateTable(tableName, false);

        runIndexTool(true, false, "", viewTenantName, indexNameTenant,
                tenantId, 0, new String[0]);

        Table htable= queryServices.getTable(Bytes.toBytes(viewIndexTableName));
        int count = getUtility().countRows(htable);
        // Confirm index has rows
        assertTrue(count == 1);

        selectSql = String.format("SELECT /*+ INDEX(%s) */ COUNT(*) FROM %s",
                indexNameTenant, viewTenantName);
        rs = connTenant.createStatement().executeQuery(selectSql);
        assertTrue(rs.next());
        assertEquals(1, rs.getInt(1));
        assertFalse(rs.next());

        String idxStmtGlobal =
                String.format(createIndexStr, indexNameGlobal, dataTableName);
        connGlobal.createStatement().execute(idxStmtGlobal);

        // run the index MR job this time with tenant id.
        // We expect it to return -1 because indexTable is not correct for this tenant.
        runIndexTool(true, false, schemaName, dataTableName, indexNameGlobal,
                tenantId, -1, new String[0]);

    } finally {
        connGlobal.close();
        connTenant.close();
    }
}
 
Example 16
Source File: TestUtil.java    From phoenix with Apache License 2.0 4 votes vote down vote up
/**
 * Runs a major compaction, and then waits until the compaction is complete before returning.
 *
 * @param tableName name of the table to be compacted
 */
public static void doMajorCompaction(Connection conn, String tableName) throws Exception {

    tableName = SchemaUtil.normalizeIdentifier(tableName);

    // We simply write a marker row, request a major compaction, and then wait until the marker
    // row is gone
    PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
    PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), tableName));
    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    MutationState mutationState = pconn.getMutationState();
    if (table.isTransactional()) {
        mutationState.startTransaction(table.getTransactionProvider());
    }
    try (Table htable = mutationState.getHTable(table)) {
        byte[] markerRowKey = Bytes.toBytes("TO_DELETE");
       
        Put put = new Put(markerRowKey);
        put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
        htable.put(put);
        Delete delete = new Delete(markerRowKey);
        delete.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_VALUE_BYTES);
        htable.delete(delete);
        htable.close();
        if (table.isTransactional()) {
            mutationState.commit();
        }
    
        Admin hbaseAdmin = services.getAdmin();
        hbaseAdmin.flush(TableName.valueOf(tableName));
        hbaseAdmin.majorCompact(TableName.valueOf(tableName));
        hbaseAdmin.close();
    
        boolean compactionDone = false;
        while (!compactionDone) {
            Thread.sleep(6000L);
            Scan scan = new Scan();
            scan.setStartRow(markerRowKey);
            scan.setStopRow(Bytes.add(markerRowKey, new byte[] { 0 }));
            scan.setRaw(true);
    
            try (Table htableForRawScan = services.getTable(Bytes.toBytes(tableName))) {
                ResultScanner scanner = htableForRawScan.getScanner(scan);
                List<Result> results = Lists.newArrayList(scanner);
                LOGGER.info("Results: " + results);
                compactionDone = results.isEmpty();
                scanner.close();
            }
            LOGGER.info("Compaction done: " + compactionDone);
            
            // need to run compaction after the next txn snapshot has been written so that compaction can remove deleted rows
            if (!compactionDone && table.isTransactional()) {
                hbaseAdmin = services.getAdmin();
                hbaseAdmin.flush(TableName.valueOf(tableName));
                hbaseAdmin.majorCompact(TableName.valueOf(tableName));
                hbaseAdmin.close();
            }
        }
    }
}