org.apache.phoenix.jdbc.PhoenixConnection Java Examples

The following examples show how to use org.apache.phoenix.jdbc.PhoenixConnection. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SystemCatalogUpgradeIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testUpgradeOnlyHappensOnce() throws Exception {
    ConnectionQueryServices services = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class).getQueryServices();
    assertTrue(services instanceof PhoenixUpgradeCountingServices);
    // Check if the timestamp version is changing between the current version and prior version
    boolean wasTimestampChanged = systemTableVersion != MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
    reinitialize = true;
    systemTableVersion = MetaDataProtocol.MIN_SYSTEM_TABLE_TIMESTAMP;
    DriverManager.getConnection(getUrl());
    // Confirm that if the timestamp changed, that an upgrade was performed (and that if it
    // didn't, that an upgrade wasn't attempted).
    assertEquals(wasTimestampChanged ? 1 : 0, countUpgradeAttempts);
    // Confirm that another connection does not increase the number of times upgrade was attempted
    DriverManager.getConnection(getUrl());
    assertEquals(wasTimestampChanged ? 1 : 0, countUpgradeAttempts);
}
 
Example #2
Source File: QueryCompiler.java    From phoenix with Apache License 2.0 6 votes vote down vote up
protected QueryPlan compileSubquery(SelectStatement subquery, boolean pushDownMaxRows) throws SQLException {
    PhoenixConnection connection = this.statement.getConnection();
    subquery = SubselectRewriter.flatten(subquery, connection);
    ColumnResolver resolver = FromCompiler.getResolverForQuery(subquery, connection);
    subquery = StatementNormalizer.normalize(subquery, resolver);
    SelectStatement transformedSubquery = SubqueryRewriter.transform(subquery, resolver, connection);
    if (transformedSubquery != subquery) {
        resolver = FromCompiler.getResolverForQuery(transformedSubquery, connection);
        subquery = StatementNormalizer.normalize(transformedSubquery, resolver);
    }
    int maxRows = this.statement.getMaxRows();
    this.statement.setMaxRows(pushDownMaxRows ? maxRows : 0); // overwrite maxRows to avoid its impact on inner queries.
    QueryPlan plan = new QueryCompiler(this.statement, subquery, resolver, bindManager, false, optimizeSubquery, null).compile();
    if (optimizeSubquery) {
        plan = statement.getConnection().getQueryServices().getOptimizer().optimize(statement, plan);
    }
    this.statement.setMaxRows(maxRows); // restore maxRows.
    return plan;
}
 
Example #3
Source File: SetPropertyIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testSetPropertyAndAddColumnForDefaultColumnFamily() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    conn.setAutoCommit(false);
    String ddl = "CREATE TABLE " + dataTableFullName +
            "  (a_string varchar not null, col1 integer" +
            "  CONSTRAINT pk PRIMARY KEY (a_string)) " + tableDDLOptions;
    try {
        conn.createStatement().execute(ddl);
        conn.createStatement().execute("ALTER TABLE " + dataTableFullName + " ADD col2 integer IN_MEMORY=true");
        try (Admin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
            ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(dataTableFullName))
                    .getColumnFamilies();
            assertEquals(1, columnFamilies.length);
            assertEquals("0", columnFamilies[0].getNameAsString());
            assertTrue(columnFamilies[0].isInMemory());
        }
    } finally {
        conn.close();
    }
}
 
Example #4
Source File: OrphanViewTool.java    From phoenix with Apache License 2.0 6 votes vote down vote up
/**
 * Go through all the physical links in the system catalog table and update the base table info of the
 * view objects in orphanViewSet. If the base or view object does not exist for a given link, then add the link
 * to orphanLinkSet
 * @param phoenixConnection
 * @throws Exception
 */
private void processPhysicalLinks(PhoenixConnection phoenixConnection)
        throws Exception {
    ResultSet physicalLinkRS = phoenixConnection.createStatement().executeQuery(physicalLinkQuery);
    while (physicalLinkRS.next()) {
        String tenantId = physicalLinkRS.getString(1);
        String schemaName = physicalLinkRS.getString(2);
        String tableName = physicalLinkRS.getString(3);
        Key viewKey = new Key(tenantId, schemaName, tableName);
        View view = orphanViewSet.get(viewKey);

        String baseTenantId = physicalLinkRS.getString(4);
        String baseFullTableName = physicalLinkRS.getString(5);
        Key baseKey = new Key(baseTenantId, baseFullTableName);
        Base base = baseSet.get(baseKey);

        if (view == null || base == null) {
            orphanLinkSet.add(new Link(viewKey, baseKey, PTable.LinkType.PHYSICAL_TABLE));
        }
        else {
            view.setBase(baseKey);
        }
    }
}
 
Example #5
Source File: FromCompiler.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private ColumnResolverWithUDF(PhoenixConnection connection, int tsAddition,
                              boolean updateCacheImmediately, Map<String,
        UDFParseNode> udfParseNodes) throws SQLException {
    this.connection = connection;
    this.client = connection == null ? null : new MetaDataClient(connection);
    this.tsAddition = tsAddition;
    functionMap = new HashMap<String, PFunction>(1);
    if (udfParseNodes.isEmpty()) {
        functions = Collections.<PFunction> emptyList();
    } else {
        functions = createFunctionRef(new ArrayList<String>(udfParseNodes.keySet()),
                updateCacheImmediately);
        for (PFunction function : functions) {
            functionMap.put(function.getFunctionName(), function);
        }
    }
}
 
Example #6
Source File: WhereCompilerTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testToDateFilter() throws Exception {
    String tenantId = "000000000000001";
    String dateStr = "2012-01-01 12:00:00";
    String query = "select * from atable where organization_id='" + tenantId + "' and a_date >= to_date('" + dateStr + "')";
    PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
    QueryPlan plan = pstmt.optimizeQuery();
    Scan scan = plan.getContext().getScan();
    Filter filter = scan.getFilter();

    Date date = DateUtil.parseDate(dateStr);

    assertEquals(
        singleKVFilter(constantComparison(
            CompareOp.GREATER_OR_EQUAL,
            A_DATE,
            date)),
        filter);
}
 
Example #7
Source File: IndexRebuildIncrementDisableCountIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private static void checkIndexPendingDisableCount(final PhoenixConnection conn,
        final String indexTableName) throws Exception {
    Runnable runnable = new Runnable() {
        @Override
        public void run() {
            try {
                while (!TestUtil.checkIndexState(conn, indexTableName, PIndexState.ACTIVE,
                    0L)) {
                    long count = getPendingDisableCount(conn, indexTableName);
                    if (count > 0) {
                        indexState =
                                new String(
                                        pendingDisableCountResult.getValue(TABLE_FAMILY_BYTES,
                                            PhoenixDatabaseMetaData.INDEX_STATE_BYTES));
                        pendingDisableCount = count;
                    }
                    Thread.sleep(100);
                }
            } catch (Exception e) {
                LOGGER.error("Error in checkPendingDisableCount : " + e);
            }
        }
    };
    Thread t1 = new Thread(runnable);
    t1.start();
}
 
Example #8
Source File: CSVCommonsLoaderIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testCSVCommonsUpsert_NonExistentTable() throws Exception {
    PhoenixConnection conn = null;
    try {
        conn = DriverManager.getConnection(getUrl()).unwrap(
                PhoenixConnection.class);
        CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, "NONEXISTENTTABLE",
                null, true, ',', '"', '\\', "!");
        csvUtil.upsert(
                new StringReader("ID,VALARRAY\n"
                        + "1,2!3!4\n"));
        fail("Trying to load a non-existent table should fail");
    } catch (IllegalArgumentException e) {
        assertEquals("Table NONEXISTENTTABLE not found", e.getMessage());
    } finally {
        if (conn != null) {
            conn.close();
        }
    }

}
 
Example #9
Source File: WhereCompilerTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void helpTestToNumberFilter(String toNumberClause, BigDecimal expectedDecimal) throws Exception {
        String tenantId = "000000000000001";
        String query = "select * from atable where organization_id='" + tenantId + "' and x_decimal >= " + toNumberClause;
        PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
        PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
        QueryPlan plan = pstmt.optimizeQuery();
        Scan scan = plan.getContext().getScan();
        Filter filter = scan.getFilter();

        assertEquals(
            singleKVFilter(constantComparison(
                CompareOp.GREATER_OR_EQUAL,
                X_DECIMAL,
                expectedDecimal)),
            filter);
}
 
Example #10
Source File: RebuildIndexConnectionPropsIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static synchronized void doSetup() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    hbaseTestUtil = new HBaseTestingUtility(conf);
    Map<String, String> serverProps = new HashMap<>();
    serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
    // need at least one retry otherwise test fails
    serverProps.put(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER, Long.toString(NUM_RPC_RETRIES));
    setUpConfigForMiniCluster(conf, new ReadOnlyProps(serverProps.entrySet().iterator()));
    hbaseTestUtil.startMiniCluster();
    // establish url and quorum. Need to use PhoenixDriver and not PhoenixTestDriver
    zkQuorum = "localhost:" + hbaseTestUtil.getZkCluster().getClientPort();
    url = PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR + zkQuorum;
    Properties driverProps = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    DriverManager.registerDriver(PhoenixDriver.INSTANCE);
    try (PhoenixConnection phxConn =
            DriverManager.getConnection(url, driverProps).unwrap(PhoenixConnection.class)) {
    }
}
 
Example #11
Source File: OrphanViewTool.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void removeOrLogOrphanLinks(PhoenixConnection phoenixConnection) {
    for (Link link : orphanLinkSet) {
        try {
            byte linkType = getLinkType(link.type);
            if (outputPath != null) {
                writer[linkType].write(link.src.getSerializedValue() + "-->" + link.dst.getSerializedValue());
                writer[linkType].newLine();
            }
            else if (!clean){
                System.out.println(link.src.getSerializedValue() + "-(" + link.type + ")->" + link.dst.getSerializedValue());
            }
            if (clean) {
                removeLink(phoenixConnection, link.src, link.dst, link.type);
            }
        } catch (Exception e) {
            // ignore
        }
    }
}
 
Example #12
Source File: Task.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public static void deleteTask(PhoenixConnection conn, PTable.TaskType taskType, Timestamp ts, String tenantId,
        String schemaName, String tableName, boolean accessCheckEnabled) throws IOException {
    PreparedStatement stmt = null;
    try {
        stmt = conn.prepareStatement("DELETE FROM " +
                PhoenixDatabaseMetaData.SYSTEM_TASK_NAME +
                " WHERE " + PhoenixDatabaseMetaData.TASK_TYPE + " = ? AND " +
                PhoenixDatabaseMetaData.TASK_TS + " = ? AND " +
                PhoenixDatabaseMetaData.TENANT_ID + (tenantId == null ? " IS NULL " : " = '" + tenantId + "'") + " AND " +
                PhoenixDatabaseMetaData.TABLE_SCHEM + (schemaName == null ? " IS NULL " : " = '" + schemaName + "'") + " AND " +
                PhoenixDatabaseMetaData.TABLE_NAME + " = ?");
        stmt.setByte(1, taskType.getSerializedValue());
        stmt.setTimestamp(2, ts);
        stmt.setString(3, tableName);
    } catch (SQLException e) {
        throw new IOException(e);
    }
    mutateSystemTaskTable(conn, stmt, accessCheckEnabled);
}
 
Example #13
Source File: PhoenixRuntime.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Deprecated
private static List<PColumn> getPkColumns(PTable ptable, Connection conn, boolean forDataTable) throws SQLException {
    PhoenixConnection pConn = conn.unwrap(PhoenixConnection.class);
    List<PColumn> pkColumns = ptable.getPKColumns();
    
    // Skip the salting column and the view index id column if present.
    // Skip the tenant id column too if the connection is tenant specific and the table used by the query plan is multi-tenant
    int offset = (ptable.getBucketNum() == null ? 0 : 1) + (ptable.isMultiTenant() && pConn.getTenantId() != null ? 1 : 0) + (ptable.getViewIndexId() == null ? 0 : 1);
    
    // get a sublist of pkColumns by skipping the offset columns.
    pkColumns = pkColumns.subList(offset, pkColumns.size());
    
    if (ptable.getType() == PTableType.INDEX && forDataTable) {
        // index tables have the same schema name as their parent/data tables.
        String fullDataTableName = ptable.getParentName().getString();
        
        // Get the corresponding columns of the data table.
        List<PColumn> dataColumns = IndexUtil.getDataColumns(fullDataTableName, pkColumns, pConn);
        pkColumns = dataColumns;
    }
    return pkColumns;
}
 
Example #14
Source File: MutationState.java    From phoenix with Apache License 2.0 6 votes vote down vote up
MutationState(int maxSize, long maxSizeBytes, PhoenixConnection connection,
        Map<TableRef, MultiRowMutationState> mutations, boolean subTask, PhoenixTransactionContext txContext) {
    this.maxSize = maxSize;
    this.maxSizeBytes = maxSizeBytes;
    this.connection = connection;
    this.batchSize = connection.getMutateBatchSize();
    this.batchSizeBytes = connection.getMutateBatchSizeBytes();
    this.mutations = mutations;
    boolean isMetricsEnabled = connection.isRequestLevelMetricsEnabled();
    this.mutationMetricQueue = isMetricsEnabled ? new MutationMetricQueue()
            : NoOpMutationMetricsQueue.NO_OP_MUTATION_METRICS_QUEUE;
    if (subTask) {
        // this code path is only used while running child scans, we can't pass the txContext to child scans
        // as it is not thread safe, so we use the tx member variable
        phoenixTransactionContext = txContext.newTransactionContext(txContext, subTask);
    } else if (txContext != null) {
        isExternalTxContext = true;
        phoenixTransactionContext = txContext.newTransactionContext(txContext, subTask);
    }
}
 
Example #15
Source File: ColumnEncodedBytesPropIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testValidateProperty() throws SQLException {
	Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
       String dataTableFullName1 = SchemaUtil.getTableName("", generateUniqueName());
       String dataTableFullName2 = SchemaUtil.getTableName("", generateUniqueName());
       try (Connection conn = DriverManager.getConnection(getUrl(), props);) {
           Statement stmt = conn.createStatement();
           String ddl = "CREATE TABLE  " + dataTableFullName1 +
                   "  (id varchar not null, val varchar " + 
                   "  CONSTRAINT pk PRIMARY KEY (id)) COLUMN_ENCODED_BYTES=4";
           stmt.execute(ddl);
           
           ddl = "CREATE TABLE  " + dataTableFullName2 +
                   "  (id varchar not null, val varchar " + 
                   "  CONSTRAINT pk PRIMARY KEY (id)) COLUMN_ENCODED_BYTES=NONE";
           stmt.execute(ddl);
           
           PhoenixConnection phxConn = conn.unwrap(PhoenixConnection.class);
           PTable dataTable1 = phxConn.getTable(new PTableKey(null, dataTableFullName1));
           assertEquals("Encoding scheme set incorrectly", QualifierEncodingScheme.FOUR_BYTE_QUALIFIERS, dataTable1.getEncodingScheme());
           
           PTable dataTable2 = phxConn.getTable(new PTableKey(null, dataTableFullName2));
           assertEquals("Encoding scheme set incorrectly", QualifierEncodingScheme.NON_ENCODED_QUALIFIERS, dataTable2.getEncodingScheme());
       } 
}
 
Example #16
Source File: WhereCompilerTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testInListWithAnd1FilterScankey() throws SQLException {
    String tenantId1 = "000000000000001";
    String tenantId2 = "000000000000002";
    String tenantId3 = "000000000000003";
    String entityId = "00000000000000X";
    String query = String.format("select * from %s where organization_id IN ('%s','%s','%s') AND entity_id='%s'",
            ATABLE_NAME, tenantId1, tenantId3, tenantId2, entityId);
    PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
    QueryPlan plan = pstmt.optimizeQuery();
    Scan scan = plan.getContext().getScan();
    byte[] startRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId1), PVarchar.INSTANCE.toBytes(entityId));
    assertArrayEquals(startRow, scan.getStartRow());
    byte[] stopRow = ByteUtil.concat(PVarchar.INSTANCE.toBytes(tenantId3), PVarchar.INSTANCE.toBytes(entityId));
    assertArrayEquals(ByteUtil.concat(stopRow, QueryConstants.SEPARATOR_BYTE_ARRAY), scan.getStopRow());
    // TODO: validate scan ranges
}
 
Example #17
Source File: TaskRegionObserver.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public static void setEndTaskStatus(PhoenixConnection connForTask, Task.TaskRecord taskRecord, String taskStatus)
        throws IOException, SQLException {
    // update data with details.
    String data = taskRecord.getData();
    if (Strings.isNullOrEmpty(data)) {
        data = "{}";
    }
    JsonNode jsonNode = JacksonUtil.getObjectReader().readTree(data);
    ((ObjectNode) jsonNode).put(TASK_DETAILS, taskStatus);
    data = jsonNode.toString();

    Timestamp endTs = new Timestamp(EnvironmentEdgeManager.currentTimeMillis());
    Task.addTask(connForTask, taskRecord.getTaskType(), taskRecord.getTenantId(), taskRecord.getSchemaName(),
            taskRecord.getTableName(), taskStatus, data, taskRecord.getPriority(),
            taskRecord.getTimeStamp(), endTs, true);
}
 
Example #18
Source File: WhereCompilerTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testTypeMismatch() throws SQLException {
    String tenantId = "000000000000001";
    String query = "select * from atable where organization_id='" + tenantId + "' and a_integer > 'foo'";
    PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);

    try {
        pstmt.optimizeQuery();
        fail();
    } catch (SQLException e) {
        assertTrue(e.getMessage().contains("Type mismatch"));
    }
}
 
Example #19
Source File: CSVCommonsLoaderIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testCSVUpsertWithInvalidNumericalData_StrictMode() throws Exception {
    CSVParser parser = null;
    PhoenixConnection conn = null;
    try {
        String stockTableName = generateUniqueName();

        // Create table
        String statements = "CREATE TABLE IF NOT EXISTS " + stockTableName
                + "(SYMBOL VARCHAR NOT NULL PRIMARY KEY, COMPANY_ID BIGINT);";
        conn = DriverManager.getConnection(getUrl())
                .unwrap(PhoenixConnection.class);
        PhoenixRuntime.executeStatements(conn,
                new StringReader(statements), null);

        // Upsert CSV file in strict mode
        CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, stockTableName,
                Arrays.asList("SYMBOL", "COMPANY_ID"), true);
        try {
            csvUtil.upsert(new StringReader(STOCK_CSV_VALUES));
            fail("Running an upsert with data that can't be upserted in strict mode "
                    + "should throw an exception");
        } catch (IllegalDataException e) {
            // Expected
        }

    } finally {
        if (parser != null)
            parser.close();
        if (conn != null)
            conn.close();
    }
}
 
Example #20
Source File: RoundRobinResultIteratorIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testRoundRobinAfterTableSplit() throws Exception {
    String tableName = generateUniqueName();
    byte[] tableNameBytes = Bytes.toBytes(tableName);
    int numRows = setupTableForSplit(tableName);
    Connection conn = getConnection();
    ConnectionQueryServices services = conn.unwrap(PhoenixConnection.class).getQueryServices();
    int nRegions = services.getAllTableRegions(tableNameBytes).size();
    int nRegionsBeforeSplit = nRegions;
    Admin admin = services.getAdmin();
    try {
        // Split is an async operation. So hoping 10 seconds is long enough time.
        // If the test tends to flap, then you might want to increase the wait time
        admin.split(TableName.valueOf(tableName));
        CountDownLatch latch = new CountDownLatch(1);
        int nTries = 0;
        long waitTimeMillis = 2000;
        while (nRegions == nRegionsBeforeSplit && nTries < 10) {
            latch.await(waitTimeMillis, TimeUnit.MILLISECONDS);
            nRegions = services.getAllTableRegions(tableNameBytes).size();
            nTries++;
        }
        
        String query = "SELECT * FROM " + tableName;
        Statement stmt = conn.createStatement();
        stmt.setFetchSize(10); // this makes scanner caches to be replenished in parallel.
        ResultSet rs = stmt.executeQuery(query);
        int numRowsRead = 0;
        while (rs.next()) {
            numRowsRead++;
        }
        nRegions = services.getAllTableRegions(tableNameBytes).size();
        // Region cache has been updated, as there are more regions now
        assertNotEquals(nRegions, nRegionsBeforeSplit);
        assertEquals(numRows, numRowsRead);
    } finally {
        admin.close();
    }

}
 
Example #21
Source File: AlterMultiTenantTableWithViewsIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private int getIndexOfPkColumn(PhoenixConnection conn, String columnName, String tableName) throws SQLException {
    String normalizedTableName = SchemaUtil.normalizeIdentifier(tableName);
    PTable table = conn.getTable(new PTableKey(conn.getTenantId(), normalizedTableName));
    List<PColumn> pkCols = table.getPKColumns();
    String normalizedColumnName = SchemaUtil.normalizeIdentifier(columnName);
    int i = 0;
    for (PColumn pkCol : pkCols) {
        if (pkCol.getName().getString().equals(normalizedColumnName)) {
            return i;
        }
        i++;
    }
    return -1;
}
 
Example #22
Source File: ScannerLeaseRenewalIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testRenewLeasePreventsUpsertSelectFromFailing() throws Exception {
    String table1 = "testRenewLeasePreventsUpsertSelectFromFailing";
    String table2 = "testRenewLeasePreventsUpsertSelectFromFailing2";

    try (Connection conn = DriverManager.getConnection(url)) {
        conn.createStatement().execute(
            "CREATE TABLE " + table1 + " (PK1 INTEGER NOT NULL PRIMARY KEY, KV1 VARCHAR)");
        conn.createStatement().execute(
            "CREATE TABLE " + table2 + " (PK1 INTEGER NOT NULL PRIMARY KEY, KV1 VARCHAR)");
        int numRecords = 5;
        int i = 0;
        String upsert = "UPSERT INTO " + table1 + " VALUES (?, ?)";
        Random random = new Random();
        PreparedStatement stmt = conn.prepareStatement(upsert);
        while (i < numRecords) {
            stmt.setInt(1, random.nextInt());
            stmt.setString(2, "KV" + random.nextInt());
            stmt.executeUpdate();
            i++;
        }
        conn.commit();
    }

    try (PhoenixConnection phxConn =
            DriverManager.getConnection(url).unwrap(PhoenixConnection.class)) {
        String upsertSelect = "UPSERT INTO " + table2 + " SELECT PK1, KV1 FROM " + table1;
        // at every next call wait for this period. This will cause lease to expire.
        long delayAfterInit = 2 * LEASE_TIMEOUT_PERIOD_MILLIS;
        phxConn.setTableResultIteratorFactory(new DelayedTableResultIteratorFactory(
                delayAfterInit));
        Statement s = phxConn.createStatement();
        s.setFetchSize(2);
        s.executeUpdate(upsertSelect);
    }
}
 
Example #23
Source File: BaseConnectionlessQueryTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void doSetup() throws Exception {
    startServer(getUrl());
    ensureTableCreated(getUrl(), ATABLE_NAME);
    ensureTableCreated(getUrl(), ENTITY_HISTORY_TABLE_NAME);
    ensureTableCreated(getUrl(), FUNKY_NAME);
    ensureTableCreated(getUrl(), PTSDB_NAME);
    ensureTableCreated(getUrl(), PTSDB2_NAME);
    ensureTableCreated(getUrl(), PTSDB3_NAME);
    ensureTableCreated(getUrl(), MULTI_CF_NAME);
    ensureTableCreated(getUrl(), JOIN_ORDER_TABLE_FULL_NAME);
    ensureTableCreated(getUrl(), JOIN_CUSTOMER_TABLE_FULL_NAME);
    ensureTableCreated(getUrl(), JOIN_ITEM_TABLE_FULL_NAME);
    ensureTableCreated(getUrl(), JOIN_SUPPLIER_TABLE_FULL_NAME);
    ensureTableCreated(getUrl(), TABLE_WITH_ARRAY);
    Properties props = new Properties();
    props.setProperty(PhoenixRuntime.CURRENT_SCN_ATTRIB, Long.toString(HConstants.LATEST_TIMESTAMP));
    PhoenixConnection conn = DriverManager.getConnection(PHOENIX_CONNECTIONLESS_JDBC_URL, props).unwrap(PhoenixConnection.class);
    try {
        PTable table = conn.getMetaDataCache().getTable(new PTableKey(null, ATABLE_NAME));
        ATABLE = table;
        ORGANIZATION_ID = new ColumnRef(new TableRef(table), table.getColumn("ORGANIZATION_ID").getPosition()).newColumnExpression();
        ENTITY_ID = new ColumnRef(new TableRef(table), table.getColumn("ENTITY_ID").getPosition()).newColumnExpression();
        A_INTEGER = new ColumnRef(new TableRef(table), table.getColumn("A_INTEGER").getPosition()).newColumnExpression();
        A_STRING = new ColumnRef(new TableRef(table), table.getColumn("A_STRING").getPosition()).newColumnExpression();
        B_STRING = new ColumnRef(new TableRef(table), table.getColumn("B_STRING").getPosition()).newColumnExpression();
        A_DATE = new ColumnRef(new TableRef(table), table.getColumn("A_DATE").getPosition()).newColumnExpression();
        A_TIME = new ColumnRef(new TableRef(table), table.getColumn("A_TIME").getPosition()).newColumnExpression();
        A_TIMESTAMP = new ColumnRef(new TableRef(table), table.getColumn("A_TIMESTAMP").getPosition()).newColumnExpression();
        X_DECIMAL = new ColumnRef(new TableRef(table), table.getColumn("X_DECIMAL").getPosition()).newColumnExpression();
    } finally {
        conn.close();
    }
}
 
Example #24
Source File: IndexMaintainer.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static IndexMaintainer create(PTable dataTable, PTable index, PhoenixConnection connection) {
    if (dataTable.getType() == PTableType.INDEX || index.getType() != PTableType.INDEX || !dataTable.getIndexes().contains(index)) {
        throw new IllegalArgumentException();
    }
    IndexMaintainer maintainer = new IndexMaintainer(dataTable, index, connection);
    return maintainer;
}
 
Example #25
Source File: DeleteCompiler.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
protected MutationState mutate(StatementContext parentContext, ResultIterator iterator, PhoenixConnection connection) throws SQLException {
    PhoenixStatement statement = new PhoenixStatement(connection);
    /*
     * We don't want to collect any read metrics within the child context. This is because any read metrics that
     * need to be captured are already getting collected in the parent statement context enclosed in the result
     * iterator being used for reading rows out.
     */
    StatementContext context = new StatementContext(statement, false);
    MutationState state = deleteRows(context, iterator, queryPlan, projectedTableRef, otherTableRefs);
    return state;
}
 
Example #26
Source File: WhereCompilerTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testScanCaching_Default() throws SQLException {
    String query = "select * from atable where a_integer=0";
    PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
    QueryPlan plan = pstmt.optimizeQuery();
    Scan scan = plan.getContext().getScan();
    Configuration config = HBaseConfiguration.create();
    int defaultScannerCacheSize = config.getInt(HConstants.HBASE_CLIENT_SCANNER_CACHING,
            HConstants.DEFAULT_HBASE_CLIENT_SCANNER_CACHING);
    assertEquals(defaultScannerCacheSize, pstmt.getFetchSize());
    assertEquals(defaultScannerCacheSize, scan.getCaching());
}
 
Example #27
Source File: WhereCompilerTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testPaddedStartStopKey() throws SQLException {
    String tenantId = "000000000000001";
    String keyPrefix = "fo";
    String query = "select * from atable where organization_id=? AND entity_id=?";
    List<Object> binds = Arrays.<Object>asList(tenantId,keyPrefix);
    PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
    bindParams(pstmt, binds);
    QueryPlan plan = pstmt.optimizeQuery();
    Scan scan = plan.getContext().getScan();
    byte[] expectedStartRow = ByteUtil.concat(Bytes.toBytes(tenantId), StringUtil.padChar(Bytes.toBytes(keyPrefix), 15));
    assertArrayEquals(expectedStartRow,scan.getStartRow());
    assertArrayEquals(ByteUtil.nextKey(expectedStartRow),scan.getStopRow());
}
 
Example #28
Source File: WhereCompilerTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testScanCaching_Default() throws SQLException {
    String query = "select * from atable where a_integer=0";
    PhoenixConnection pconn = DriverManager.getConnection(getUrl(), PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
    QueryPlan plan = pstmt.optimizeQuery();
    Scan scan = plan.getContext().getScan();
    assertEquals(QueryServicesOptions.DEFAULT_SCAN_CACHE_SIZE, pstmt.getFetchSize());
    assertEquals(QueryServicesOptions.DEFAULT_SCAN_CACHE_SIZE, scan.getCaching());
}
 
Example #29
Source File: WhereCompilerTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testTenantConstraintsAddedToScan() throws SQLException {
    String tenantTypeId = "5678";
    String tenantId = "000000000000123";
    String url = getUrl(tenantId);
    createTestTable(getUrl(), "create table base_table_for_tenant_filter_test (tenant_id char(15) not null, type_id char(4) not null, " +
    		"id char(5) not null, a_integer integer, a_string varchar(100) constraint pk primary key (tenant_id, type_id, id)) multi_tenant=true");
    createTestTable(url, "create view tenant_filter_test (tenant_col integer) AS SELECT * FROM BASE_TABLE_FOR_TENANT_FILTER_TEST WHERE type_id= '" + tenantTypeId + "'");

    String query = "select * from tenant_filter_test where a_integer=0 and a_string='foo'";
    PhoenixConnection pconn = DriverManager.getConnection(url, PropertiesUtil.deepCopy(TEST_PROPERTIES)).unwrap(PhoenixConnection.class);
    PhoenixPreparedStatement pstmt = newPreparedStatement(pconn, query);
    QueryPlan plan = pstmt.optimizeQuery();
    Scan scan = plan.getContext().getScan();
    Filter filter = scan.getFilter();
    PTable table = plan.getTableRef().getTable();
    Expression aInteger = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_INTEGER").getPosition()).newColumnExpression();
    Expression aString = new ColumnRef(new TableRef(table), table.getColumnForColumnName("A_STRING").getPosition()).newColumnExpression();
    assertEquals(
        multiEncodedKVFilter(and(
            constantComparison(
                CompareOp.EQUAL,
                aInteger,
                0),
            constantComparison(
                CompareOp.EQUAL,
                aString,
                "foo")), TWO_BYTE_QUALIFIERS),
        filter);

    byte[] startRow = PVarchar.INSTANCE.toBytes(tenantId + tenantTypeId);
    assertArrayEquals(startRow, scan.getStartRow());
    byte[] stopRow = startRow;
    assertArrayEquals(ByteUtil.nextKey(stopRow), scan.getStopRow());
}
 
Example #30
Source File: AlterTableIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testSettingPropertiesWhenTableHasDefaultColFamilySpecified() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    String ddl = "CREATE TABLE T11 (\n"
            +"ID1 VARCHAR(15) NOT NULL,\n"
            +"ID2 VARCHAR(15) NOT NULL,\n"
            +"CREATED_DATE DATE,\n"
            +"CREATION_TIME BIGINT,\n"
            +"CF.LAST_USED DATE,\n"
            +"CONSTRAINT PK PRIMARY KEY (ID1, ID2)) IMMUTABLE_ROWS=true, DEFAULT_COLUMN_FAMILY = 'XYZ'";
    Connection conn = DriverManager.getConnection(getUrl(), props);
    conn.createStatement().execute(ddl);
    assertImmutableRows(conn, "T11", true);
    ddl = "ALTER TABLE T11 SET COMPACTION_ENABLED = FALSE, CF.REPLICATION_SCOPE=1, IMMUTABLE_ROWS = TRUE, TTL=1000";
    conn.createStatement().execute(ddl);
    assertImmutableRows(conn, "T11", true);
    try (HBaseAdmin admin = conn.unwrap(PhoenixConnection.class).getQueryServices().getAdmin()) {
        HTableDescriptor tableDesc = admin.getTableDescriptor(Bytes.toBytes("T11"));
        HColumnDescriptor[] columnFamilies = tableDesc.getColumnFamilies();
        assertEquals(2, columnFamilies.length);
        assertEquals("CF", columnFamilies[0].getNameAsString());
        assertEquals(1, columnFamilies[0].getScope());
        assertEquals(1000, columnFamilies[0].getTimeToLive());
        assertEquals("XYZ", columnFamilies[1].getNameAsString());
        assertEquals(DEFAULT_REPLICATION_SCOPE, columnFamilies[1].getScope());
        assertEquals(1000, columnFamilies[1].getTimeToLive());
        assertEquals(Boolean.toString(false), tableDesc.getValue(HTableDescriptor.COMPACTION_ENABLED));
    }
}