Java Code Examples for org.apache.phoenix.jdbc.PhoenixConnection#commit()

The following examples show how to use org.apache.phoenix.jdbc.PhoenixConnection#commit() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: OrphanViewTool.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void forcefullyDropView(PhoenixConnection phoenixConnection,
                                Key key) throws Exception {
    String deleteRowsFromCatalog = "DELETE FROM " + SYSTEM_CATALOG_NAME +
            " WHERE " + TENANT_ID + (key.getTenantId() == null ? " IS NULL" : " = '" + key.getTenantId() + "'") + " AND " +
            TABLE_SCHEM + (key.getSchemaName() == null ? " IS NULL " : " = '" + key.getSchemaName() + "'") + " AND " +
            TABLE_NAME + " = '" + key.getTableName() + "'";
    String deleteRowsFromChildLink = "DELETE FROM " + SYSTEM_CHILD_LINK_NAME +
            " WHERE " + COLUMN_NAME + (key.getTenantId() == null ? " IS NULL" : " = '" + key.getTenantId() + "'") + " AND " +
            COLUMN_FAMILY + " = '" + (key.getSchemaName() == null ? key.getTableName() : key.getSchemaName() + "." + key.getTableName()) + "'";
    try {
        phoenixConnection.createStatement().execute(deleteRowsFromCatalog);
        phoenixConnection.createStatement().execute(deleteRowsFromChildLink);
        phoenixConnection.commit();
    } catch (SQLException e) {
        throw new IOException(e);
    }
}
 
Example 2
Source File: OrphanViewTool.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private void removeLink(PhoenixConnection phoenixConnection, Key src, Key dst, PTable.LinkType linkType) throws Exception {
    String deleteQuery = "DELETE FROM " +
            ((linkType == PTable.LinkType.PHYSICAL_TABLE || linkType == PTable.LinkType.PARENT_TABLE) ? SYSTEM_CATALOG_NAME : SYSTEM_CHILD_LINK_NAME) +
            " WHERE " + TENANT_ID + (src.getTenantId() == null ? " IS NULL" : " = '" + src.getTenantId() + "'") + " AND " +
            TABLE_SCHEM + (src.getSchemaName() == null ? " IS NULL " : " = '" + src.getSchemaName() + "'") + " AND " +
            TABLE_NAME + " = '" + src.getTableName() + "' AND " +
            COLUMN_NAME + (dst.getTenantId() == null ? " IS NULL" : " = '" + dst.getTenantId() + "'") + " AND " +
            COLUMN_FAMILY + " = '" + (dst.getSchemaName() == null ? dst.getTableName() : dst.getSchemaName() + "." +
            dst.getTableName()) + "'";
    phoenixConnection.createStatement().execute(deleteQuery);
    phoenixConnection.commit();
}
 
Example 3
Source File: MutatingParallelIteratorFactory.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Override
public PeekingResultIterator newIterator(StatementContext context, ResultIterator iterator, Scan scan) throws SQLException {
    final PhoenixConnection connection = new PhoenixConnection(this.connection);
    MutationState state = mutate(context, iterator, connection);
    long totalRowCount = state.getUpdateCount();
    if (connection.getAutoCommit()) {
        connection.getMutationState().join(state);
        connection.commit();
        ConnectionQueryServices services = connection.getQueryServices();
        int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
        state = new MutationState(maxSize, connection, totalRowCount);
    }
    final MutationState finalState = state;
    byte[] value = PLong.INSTANCE.toBytes(totalRowCount);
    KeyValue keyValue = KeyValueUtil.newKeyValue(UNGROUPED_AGG_ROW_KEY, SINGLE_COLUMN_FAMILY, SINGLE_COLUMN, AGG_TIMESTAMP, value, 0, value.length);
    final Tuple tuple = new SingleKeyValueTuple(keyValue);
    return new PeekingResultIterator() {
        private boolean done = false;
        
        @Override
        public Tuple next() throws SQLException {
            if (done) {
                return null;
            }
            done = true;
            return tuple;
        }

        @Override
        public void explain(List<String> planSteps) {
        }

        @Override
        public void close() throws SQLException {
            try {
                // Join the child mutation states in close, since this is called in a single threaded manner
                // after the parallel results have been processed.
                if (!connection.getAutoCommit()) {
                    MutatingParallelIteratorFactory.this.connection.getMutationState().join(finalState);
                }
            } finally {
                connection.close();
            }
        }

        @Override
        public Tuple peek() throws SQLException {
            return done ? null : tuple;
        }
    };
}
 
Example 4
Source File: UpsertCompiler.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private static MutationState upsertSelect(PhoenixStatement statement, 
        TableRef tableRef, RowProjector projector, ResultIterator iterator, int[] columnIndexes,
        int[] pkSlotIndexes) throws SQLException {
    try {
        PhoenixConnection connection = statement.getConnection();
        ConnectionQueryServices services = connection.getQueryServices();
        int maxSize = services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
        int batchSize = Math.min(connection.getMutateBatchSize(), maxSize);
        boolean isAutoCommit = connection.getAutoCommit();
        byte[][] values = new byte[columnIndexes.length][];
        int rowCount = 0;
        Map<ImmutableBytesPtr,Map<PColumn,byte[]>> mutation = Maps.newHashMapWithExpectedSize(batchSize);
        PTable table = tableRef.getTable();
        ResultSet rs = new PhoenixResultSet(iterator, projector, statement);
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        while (rs.next()) {
            for (int i = 0; i < values.length; i++) {
                PColumn column = table.getColumns().get(columnIndexes[i]);
                byte[] bytes = rs.getBytes(i+1);
                ptr.set(bytes == null ? ByteUtil.EMPTY_BYTE_ARRAY : bytes);
                Object value = rs.getObject(i+1);
                int rsPrecision = rs.getMetaData().getPrecision(i+1);
                Integer precision = rsPrecision == 0 ? null : rsPrecision;
                int rsScale = rs.getMetaData().getScale(i+1);
                Integer scale = rsScale == 0 ? null : rsScale;
                // We are guaranteed that the two column will have compatible types,
                // as we checked that before.
                if (!column.getDataType().isSizeCompatible(ptr, value, column.getDataType(),
                        precision, scale,
                        column.getMaxLength(),column.getScale())) {
                    throw new SQLExceptionInfo.Builder(SQLExceptionCode.DATA_EXCEEDS_MAX_CAPACITY)
                        .setColumnName(column.getName().getString())
                        .setMessage("value=" + column.getDataType().toStringLiteral(ptr, null)).build().buildException();
                }
                column.getDataType().coerceBytes(ptr, value, column.getDataType(),
                        precision, scale, SortOrder.getDefault(),
                        column.getMaxLength(), column.getScale(), column.getSortOrder());
                values[i] = ByteUtil.copyKeyBytesIfNecessary(ptr);
            }
            setValues(values, pkSlotIndexes, columnIndexes, table, mutation);
            rowCount++;
            // Commit a batch if auto commit is true and we're at our batch size
            if (isAutoCommit && rowCount % batchSize == 0) {
                MutationState state = new MutationState(tableRef, mutation, 0, maxSize, connection);
                connection.getMutationState().join(state);
                connection.commit();
                mutation.clear();
            }
        }
        // If auto commit is true, this last batch will be committed upon return
        return new MutationState(tableRef, mutation, rowCount / batchSize * batchSize, maxSize, connection);
    } finally {
        iterator.close();
    }
}
 
Example 5
Source File: QueryDatabaseMetaDataIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testCreateOnExistingTable() throws Exception {
    PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    String tableName = MDTEST_NAME;
    String schemaName = MDTEST_SCHEMA_NAME;
    byte[] cfA = Bytes.toBytes(SchemaUtil.normalizeIdentifier("a"));
    byte[] cfB = Bytes.toBytes(SchemaUtil.normalizeIdentifier("b"));
    byte[] cfC = Bytes.toBytes("c");
    byte[][] familyNames = new byte[][] {cfB, cfC};
    byte[] htableName = SchemaUtil.getTableNameAsBytes(schemaName, tableName);
    HBaseAdmin admin = pconn.getQueryServices().getAdmin();
    try {
        admin.disableTable(htableName);
        admin.deleteTable(htableName);
        admin.enableTable(htableName);
    } catch (org.apache.hadoop.hbase.TableNotFoundException e) {
    }
    
    @SuppressWarnings("deprecation")
    HTableDescriptor descriptor = new HTableDescriptor(htableName);
    for (byte[] familyName : familyNames) {
        HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);
        descriptor.addFamily(columnDescriptor);
    }
    admin.createTable(descriptor);
        
    long ts = nextTimestamp();
    Properties props = new Properties();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 5));
    PhoenixConnection conn1 = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class);
    ensureTableCreated(getUrl(), tableName, null, ts);
    
    descriptor = admin.getTableDescriptor(htableName);
    assertEquals(3,descriptor.getColumnFamilies().length);
    HColumnDescriptor cdA = descriptor.getFamily(cfA);
    assertNotEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdA.getKeepDeletedCellsAsEnum());
    assertEquals(DataBlockEncoding.NONE, cdA.getDataBlockEncoding()); // Overriden using WITH
    assertEquals(1,cdA.getMaxVersions());// Overriden using WITH
    HColumnDescriptor cdB = descriptor.getFamily(cfB);
    // Allow KEEP_DELETED_CELLS to be false for VIEW
    assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdB.getKeepDeletedCellsAsEnum());
    assertEquals(DataBlockEncoding.NONE, cdB.getDataBlockEncoding()); // Should keep the original value.
    // CF c should stay the same since it's not a Phoenix cf.
    HColumnDescriptor cdC = descriptor.getFamily(cfC);
    assertNotNull("Column family not found", cdC);
    assertEquals(HColumnDescriptor.DEFAULT_KEEP_DELETED, cdC.getKeepDeletedCellsAsEnum());
    assertFalse(SchemaUtil.DEFAULT_DATA_BLOCK_ENCODING == cdC.getDataBlockEncoding());
    assertTrue(descriptor.hasCoprocessor(UngroupedAggregateRegionObserver.class.getName()));
    assertTrue(descriptor.hasCoprocessor(GroupedAggregateRegionObserver.class.getName()));
    assertTrue(descriptor.hasCoprocessor(ServerCachingEndpointImpl.class.getName()));
    admin.close();
     
    int rowCount = 5;
    String upsert = "UPSERT INTO " + tableName + "(id,col1,col2) VALUES(?,?,?)";
    PreparedStatement ps = conn1.prepareStatement(upsert);
    for (int i = 0; i < rowCount; i++) {
        ps.setString(1, Integer.toString(i));
        ps.setInt(2, i+1);
        ps.setInt(3, i+2);
        ps.execute();
    }
    conn1.commit();
    conn1.close();
    
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(ts + 6));
    Connection conn2 = DriverManager.getConnection(getUrl(), props);
    String query = "SELECT count(1) FROM " + tableName;
    ResultSet rs = conn2.createStatement().executeQuery(query);
    assertTrue(rs.next());
    assertEquals(rowCount, rs.getLong(1));
    
    query = "SELECT id, col1,col2 FROM " + tableName;
    rs = conn2.createStatement().executeQuery(query);
    for (int i = 0; i < rowCount; i++) {
        assertTrue(rs.next());
        assertEquals(Integer.toString(i),rs.getString(1));
        assertEquals(i+1, rs.getInt(2));
        assertEquals(i+2, rs.getInt(3));
    }
    assertFalse(rs.next());
    conn2.close();
}
 
Example 6
Source File: LocalIndexIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testLocalIndexAutomaticRepair() throws Exception {
    if (isNamespaceMapped) { return; }
    PhoenixConnection conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class);
    try (Table metaTable = conn.getQueryServices().getTable(TableName.META_TABLE_NAME.getName());
            Admin admin = conn.getQueryServices().getAdmin();) {
        Statement statement = conn.createStatement();
        final String tableName = "T_AUTO_MATIC_REPAIR";
        String indexName = "IDX_T_AUTO_MATIC_REPAIR";
        String indexName1 = "IDX_T_AUTO_MATIC_REPAIR_1";
        statement.execute("create table " + tableName + " (id integer not null,fn varchar,"
                + "cf1.ln varchar constraint pk primary key(id)) split on (400,800,1200,1600)");
        statement.execute("create local index " + indexName + " on " + tableName + "  (fn,cf1.ln)");
        statement.execute("create local index " + indexName1 + " on " + tableName + "  (fn)");
        for (int i = 0; i < 2000; i++) {
            statement.execute("upsert into " + tableName + "  values(" + i + ",'fn" + i + "','ln" + i + "')");
        }
        conn.commit();
        ResultSet rs = statement.executeQuery("SELECT COUNT(*) FROM " + indexName);
        assertTrue(rs.next());
        assertEquals(2000, rs.getLong(1));
        List<RegionInfo> tableRegions = admin.getRegions(TableName.valueOf(tableName));
        admin.disableTable(TableName.valueOf(tableName));
        copyLocalIndexHFiles(config, tableRegions.get(0), tableRegions.get(1), false);
        copyLocalIndexHFiles(config, tableRegions.get(3), tableRegions.get(0), false);
        admin.enableTable(TableName.valueOf(tableName));

        int count=getCount(conn, tableName, "L#0");
        assertTrue(count > 4000);
        admin.majorCompact(TableName.valueOf(tableName));
        int tryCount = 5;// need to wait for rebuilding of corrupted local index region
        while (tryCount-- > 0 && count != 4000) {
            Thread.sleep(15000);
            count = getCount(conn, tableName, "L#0");
        }
        assertEquals(4000, count);
        rs = statement.executeQuery("SELECT COUNT(*) FROM " + indexName1);
        assertTrue(rs.next());
        assertEquals(2000, rs.getLong(1));
        rs = statement.executeQuery("SELECT COUNT(*) FROM " + indexName);
        assertTrue(rs.next());
        assertEquals(2000, rs.getLong(1));
        statement.execute("DROP INDEX " + indexName1 + " ON " + tableName);
        admin.majorCompact(TableName.valueOf(tableName));
        statement.execute("DROP INDEX " + indexName + " ON " + tableName);
        admin.majorCompact(TableName.valueOf(tableName));
        Thread.sleep(15000);
        admin.majorCompact(TableName.valueOf(tableName));
        Thread.sleep(15000);
        rs = statement.executeQuery("SELECT COUNT(*) FROM " + tableName);
        assertTrue(rs.next());
        
    }
}
 
Example 7
Source File: MappingTableDataTypeIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
@Test
public void testMappingHbaseTableToPhoenixTable() throws Exception {
    String mtest = generateUniqueName();
    final TableName tableName = TableName.valueOf(mtest);
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    PhoenixConnection conn = DriverManager.getConnection(getUrl(), props).unwrap(PhoenixConnection.class);
    
    Admin admin = conn.getQueryServices().getAdmin();
    try {
        // Create table then get the single region for our new table.
        TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName);
        builder.addColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf1")))
                .addColumnFamily(ColumnFamilyDescriptorBuilder.of(Bytes.toBytes("cf2")));
        admin.createTable(builder.build());
        Table t = conn.getQueryServices().getTable(Bytes.toBytes(mtest));
        insertData(tableName.getName(), admin, t);
        t.close();
        // create phoenix table that maps to existing HBase table
        createPhoenixTable(mtest);
        
        String selectSql = "SELECT * FROM " + mtest;
        ResultSet rs = conn.createStatement().executeQuery(selectSql);
        ResultSetMetaData rsMetaData = rs.getMetaData();
        assertTrue("Expected single row", rs.next());
        // verify values from cf2 is not returned
        assertEquals("Number of columns", 2, rsMetaData.getColumnCount());
        assertEquals("Column Value", "value1", rs.getString(2));
        assertFalse("Expected single row ", rs.next());
        
        // delete the row
        String deleteSql = "DELETE FROM " + mtest + " WHERE id = 'row'";
        conn.createStatement().executeUpdate(deleteSql);
        conn.commit();
        
        // verify that no rows are returned when querying through phoenix
        rs = conn.createStatement().executeQuery(selectSql);
        assertFalse("Expected no row` ", rs.next());
        
        // verify that row with value for cf2 still exists when using hbase apis
        Scan scan = new Scan();
        ResultScanner results = t.getScanner(scan);
        Result result = results.next();
        assertNotNull("Expected single row", result);
        List<Cell> kvs = result.getColumnCells(Bytes.toBytes("cf2"), Bytes.toBytes("q2"));
        assertEquals("Expected single value ", 1, kvs.size());
        assertEquals("Column Value", "value2", Bytes.toString(kvs.get(0).getValueArray(), kvs.get(0).getValueOffset(), kvs.get(0).getValueLength()));
        assertNull("Expected single row", results.next());
    } finally {
        admin.close();
    }
}
 
Example 8
Source File: UpgradeIT.java    From phoenix with Apache License 2.0 4 votes vote down vote up
private void testMergeViewIndexSequencesHelper(boolean isNamespaceMappingEnabled) throws Exception {
    PhoenixConnection conn = getConnection(false, null, isNamespaceMappingEnabled).unwrap(PhoenixConnection.class);
    ConnectionQueryServices cqs = conn.getQueryServices();
    //First delete any sequences that may exist from previous tests
    conn.createStatement().execute("DELETE FROM " + PhoenixDatabaseMetaData.SYSTEM_SEQUENCE);
    conn.commit();
    cqs.clearCache();
    //Now make sure that running the merge logic doesn't cause a problem when there are no
    //sequences
    UpgradeUtil.mergeViewIndexIdSequences(cqs, conn);
    PName tenantOne = PNameFactory.newName("TENANT_ONE");
    PName tenantTwo = PNameFactory.newName("TENANT_TWO");
    String tableName =
        SchemaUtil.getPhysicalHBaseTableName("TEST",
            "T_" + generateUniqueName(), isNamespaceMappingEnabled).getString();
    PName viewIndexTable = PNameFactory.newName(MetaDataUtil.getViewIndexPhysicalName(tableName));
    SequenceKey sequenceOne =
        createViewIndexSequenceWithOldName(cqs, tenantOne, viewIndexTable, isNamespaceMappingEnabled);
    SequenceKey sequenceTwo =
        createViewIndexSequenceWithOldName(cqs, tenantTwo, viewIndexTable, isNamespaceMappingEnabled);
    SequenceKey sequenceGlobal =
        createViewIndexSequenceWithOldName(cqs, null, viewIndexTable, isNamespaceMappingEnabled);

    List<SequenceAllocation> allocations = Lists.newArrayList();
    long val1 = 10;
    long val2 = 100;
    long val3 = 1000;
    allocations.add(new SequenceAllocation(sequenceOne, val1));
    allocations.add(new SequenceAllocation(sequenceGlobal, val2));
    allocations.add(new SequenceAllocation(sequenceTwo, val3));


    long[] incrementedValues = new long[3];
    SQLException[] exceptions = new SQLException[3];
    //simulate incrementing the view indexes
    cqs.incrementSequences(allocations, EnvironmentEdgeManager.currentTimeMillis(), incrementedValues,
        exceptions);
    for (SQLException e : exceptions) {
        assertNull(e);
    }

    UpgradeUtil.mergeViewIndexIdSequences(cqs, conn);
    //now check that there exists a sequence using the new naming convention, whose value is the
    //max of all the previous sequences for this table.

    List<SequenceAllocation> afterUpgradeAllocations = Lists.newArrayList();
    SequenceKey sequenceUpgrade = MetaDataUtil.getViewIndexSequenceKey(null, viewIndexTable, 0, isNamespaceMappingEnabled);
    afterUpgradeAllocations.add(new SequenceAllocation(sequenceUpgrade, 1));
    long[] afterUpgradeValues = new long[1];
    SQLException[] afterUpgradeExceptions = new SQLException[1];
    cqs.incrementSequences(afterUpgradeAllocations, EnvironmentEdgeManager.currentTimeMillis(), afterUpgradeValues, afterUpgradeExceptions);

    assertNull(afterUpgradeExceptions[0]);
    int safetyIncrement = 100;
    if (isNamespaceMappingEnabled){
        //since one sequence (the global one) will be reused as the "new" sequence,
        // it's already in cache and will reflect the final increment immediately
        assertEquals(Long.MIN_VALUE + val3 + safetyIncrement + 1, afterUpgradeValues[0]);
    } else {
        assertEquals(Long.MIN_VALUE + val3 + safetyIncrement, afterUpgradeValues[0]);
    }
}