org.apache.phoenix.query.QueryServices Java Examples

The following examples show how to use org.apache.phoenix.query.QueryServices. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StoreNullsPropIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testSetStoreNullsDefaultViaConfig() throws SQLException {
    Properties props = new Properties();
    props.setProperty(QueryServices.DEFAULT_STORE_NULLS_ATTRIB, "true");
    Connection storeNullsConn = DriverManager.getConnection(getUrl(), props);

    Statement stmt = storeNullsConn.createStatement();
    stmt.execute("CREATE TABLE with_nulls_default (" +
            "id smallint primary key," +
            "name varchar)");

    ResultSet rs = stmt.executeQuery("SELECT store_nulls FROM \"SYSTEM\".CATALOG " +
            "WHERE table_name = 'WITH_NULLS_DEFAULT' AND store_nulls is not null");
    assertTrue(rs.next());
    assertTrue(rs.getBoolean(1));
}
 
Example #2
Source File: MutationStateTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testPendingMutationsOnDDL() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    props.setProperty(QueryServices.PENDING_MUTATIONS_DDL_THROW_ATTRIB, "true");
    try (Connection conn = DriverManager.getConnection(getUrl(), props);
            PhoenixConnection pConnSpy = spy((PhoenixConnection) conn)) {
        MutationState mutationState = mock(MutationState.class);
        when(mutationState.getNumRows()).thenReturn(1);

        // Create a connection with mutation state and mock it
        doReturn(mutationState).when(pConnSpy).getMutationState();
        exceptionRule.expect(SQLException.class);
        exceptionRule.expectMessage(
            SQLExceptionCode.CANNOT_PERFORM_DDL_WITH_PENDING_MUTATIONS.getMessage());

        pConnSpy.createStatement().execute("create table MUTATION_TEST1"
                + "( id1 UNSIGNED_INT not null primary key," + "appId1 VARCHAR)");
    }

}
 
Example #3
Source File: DropIndexDuringUpsertIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Before
public void doSetup() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    setUpConfigForMiniCluster(conf);
    conf.setInt("hbase.client.retries.number", 2);
    conf.setInt("hbase.client.pause", 5000);
    conf.setInt("hbase.balancer.period", Integer.MAX_VALUE);
    conf.setLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB, 0);
    util = new HBaseTestingUtility(conf);
    util.startMiniCluster(NUM_SLAVES);
    String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
    url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort
            + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;

    Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
    // Must update config before starting server
    props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
    driver = initAndRegisterDriver(url, new ReadOnlyProps(props.entrySet().iterator()));
}
 
Example #4
Source File: ServerCacheClient.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public ServerCache(byte[] id, Set<HRegionLocation> servers, ImmutableBytesWritable cachePtr,
        ConnectionQueryServices services, boolean storeCacheOnClient) throws IOException {
    maxServerCacheTTL = services.getProps().getInt(
            QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB,
            QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS);
    this.id = id;
    this.servers = new HashMap();
    long currentTime = EnvironmentEdgeManager.currentTimeMillis();
    for(HRegionLocation loc : servers) {
        this.servers.put(loc, currentTime);
    }
    this.size =  cachePtr.getLength();
    if (storeCacheOnClient) {
        try {
            this.chunk = services.getMemoryManager().allocate(cachePtr.getLength());
            this.cachePtr = cachePtr;
        } catch (InsufficientMemoryException e) {
            this.outputFile = File.createTempFile("HashJoinCacheSpooler", ".bin", new File(services.getProps()
                    .get(QueryServices.SPOOL_DIRECTORY, QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY)));
            try (OutputStream fio = Files.newOutputStream(outputFile.toPath())) {
                fio.write(cachePtr.get(), cachePtr.getOffset(), cachePtr.getLength());
            }
        }
    }
    
}
 
Example #5
Source File: PhoenixServerRpcIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testUpsertSelectServerDisabled() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    // disable server side upsert select
    props.setProperty(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "false");
    try (Connection conn = driver.connect(getUrl(), props)) {
        // create two tables with identical schemas
        createTable(conn, dataTableFullName);
        upsertRow(conn, dataTableFullName);
        String tableName2 = dataTableFullName + "_2";
        createTable(conn, tableName2);
        ensureTablesOnDifferentRegionServers(dataTableFullName, tableName2);
        // copy the row from the first table using upsert select
        upsertSelectRows(conn, dataTableFullName, tableName2);
        Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor(),
                Mockito.never()).dispatch(Mockito.any(CallRunner.class));

    }
}
 
Example #6
Source File: NonTxIndexBuilderTest.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private IndexMaintainer getTestIndexMaintainer() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
    // disable column encoding, makes debugging easier
    props.put(QueryServices.DEFAULT_COLUMN_ENCODED_BYTES_ATRRIB, "0");
    Connection conn = DriverManager.getConnection(getUrl(), props);
    try {
        conn.setAutoCommit(true);
        conn.createStatement().execute(TEST_TABLE_DDL);
        conn.createStatement().execute(TEST_TABLE_INDEX_DDL);
        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
        PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), TEST_TABLE_STRING));
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        table.getIndexMaintainers(ptr, pconn);
        List<IndexMaintainer> indexMaintainerList =
                IndexMaintainer.deserialize(ptr, GenericKeyValueBuilder.INSTANCE, true);
        assertEquals(1, indexMaintainerList.size());
        IndexMaintainer indexMaintainer = indexMaintainerList.get(0);
        return indexMaintainer;
    } finally {
        conn.close();
    }
}
 
Example #7
Source File: SpillableGroupByIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static synchronized void doSetup() throws Exception {
    Map<String, String> props = Maps.newHashMapWithExpectedSize(11);
    // Set a very small cache size to force plenty of spilling
    props.put(QueryServices.GROUPBY_MAX_CACHE_SIZE_ATTRIB,
            Integer.toString(1));
    props.put(QueryServices.GROUPBY_SPILLABLE_ATTRIB, String.valueOf(true));
    props.put(QueryServices.GROUPBY_SPILL_FILES_ATTRIB,
            Integer.toString(1));
    // Large enough to not run out of memory, but small enough to spill
    props.put(QueryServices.MAX_MEMORY_SIZE_ATTRIB, Integer.toString(40000));
    
    // Set guidepost width, but disable stats
    props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
    props.put(QueryServices.STATS_COLLECTION_ENABLED, Boolean.toString(false));
    props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, Boolean.TRUE.toString());
    props.put(QueryServices.EXPLAIN_ROW_COUNT_ATTRIB, Boolean.TRUE.toString());
    // Must update config before starting server
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
 
Example #8
Source File: StoreNullsIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testSetStoreNullsDefaultViaConfig() throws SQLException {
    Properties props = new Properties();
    props.setProperty(QueryServices.DEFAULT_STORE_NULLS_ATTRIB, "true");
    Connection storeNullsConn = DriverManager.getConnection(getUrl(), props);

    Statement stmt = storeNullsConn.createStatement();
    stmt.execute("CREATE TABLE with_nulls_default (" +
            "id smallint primary key," +
            "name varchar)");

    ResultSet rs = stmt.executeQuery("SELECT store_nulls FROM SYSTEM.CATALOG " +
            "WHERE table_name = 'WITH_NULLS_DEFAULT' AND store_nulls is not null");
    assertTrue(rs.next());
    assertTrue(rs.getBoolean(1));
}
 
Example #9
Source File: AggregatePlan.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private ParallelIteratorFactory wrapParallelIteratorFactory () {
    ParallelIteratorFactory innerFactory;
    QueryServices services = context.getConnection().getQueryServices();
    if (groupBy.isEmpty() || groupBy.isOrderPreserving()) {
        if (ScanUtil.isPacingScannersPossible(context)) {
            innerFactory = ParallelIteratorFactory.NOOP_FACTORY;
        } else {
            innerFactory = new SpoolingResultIterator.SpoolingResultIteratorFactory(services);
        }
    } else {
        innerFactory = new OrderingResultIteratorFactory(services,this.getOrderBy());
    }
    if (parallelIteratorFactory == null) {
        return innerFactory;
    }
    // wrap any existing parallelIteratorFactory
    return new WrappingResultIteratorFactory(innerFactory, parallelIteratorFactory);
}
 
Example #10
Source File: DeleteIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void testDeleteFilter(boolean autoCommit) throws Exception {
    Properties props = new Properties();
    props.setProperty(QueryServices.ENABLE_SERVER_SIDE_DELETE_MUTATIONS,
        allowServerSideMutations);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    String tableName = initTableValues(conn);

    assertTableCount(conn, tableName, NUMBER_OF_ROWS);
    
    conn.setAutoCommit(autoCommit);
    String deleteStmt = "DELETE FROM " + tableName + " WHERE 20 = j";
    assertEquals(1,conn.createStatement().executeUpdate(deleteStmt));
    if (!autoCommit) {
        conn.commit();
    }

    assertTableCount(conn, tableName, NUMBER_OF_ROWS - 1);
}
 
Example #11
Source File: BasePermissionsIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private static void enablePhoenixHBaseAuthorization(Configuration config,
                                                    boolean useCustomAccessController) {
    config.set("hbase.superuser", SUPER_USER + "," + "superUser2");
    config.set("hbase.security.authorization", Boolean.TRUE.toString());
    config.set("hbase.security.exec.permission.checks", Boolean.TRUE.toString());
    if(useCustomAccessController) {
        config.set("hbase.coprocessor.master.classes",
                CustomAccessController.class.getName());
        config.set("hbase.coprocessor.region.classes",
                CustomAccessController.class.getName());
        config.set("hbase.coprocessor.regionserver.classes",
                CustomAccessController.class.getName());
    } else {
        config.set("hbase.coprocessor.master.classes",
                "org.apache.hadoop.hbase.security.access.AccessController");
        config.set("hbase.coprocessor.region.classes",
                "org.apache.hadoop.hbase.security.access.AccessController");
        config.set("hbase.coprocessor.regionserver.classes",
                "org.apache.hadoop.hbase.security.access.AccessController");
    }
    config.set(QueryServices.PHOENIX_ACLS_ENABLED,"true");

    config.set("hbase.regionserver.wal.codec", "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec");
}
 
Example #12
Source File: IndexUpgradeTool.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private static void setRpcRetriesAndTimeouts(Configuration conf) {
    long indexRebuildQueryTimeoutMs =
            conf.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB,
                    QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT);
    long indexRebuildRPCTimeoutMs =
            conf.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB,
                    QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT);
    long indexRebuildClientScannerTimeOutMs =
            conf.getLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB,
                    QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT);
    int indexRebuildRpcRetriesCounter =
            conf.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER,
                    QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER);

    // Set phoenix and hbase level timeouts and rpc retries
    conf.setLong(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, indexRebuildQueryTimeoutMs);
    conf.setLong(HConstants.HBASE_RPC_TIMEOUT_KEY, indexRebuildRPCTimeoutMs);
    conf.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
            indexRebuildClientScannerTimeOutMs);
    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, indexRebuildRpcRetriesCounter);
}
 
Example #13
Source File: TenantSpecificViewIndexIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
private void createTableAndValidate(String tableName, boolean isNamespaceEnabled) throws Exception {
    Properties props = new Properties();
    if (isNamespaceEnabled) {
        props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(true));
    }
    Connection conn = DriverManager.getConnection(getUrl(), props);
    if (isNamespaceEnabled) {
        conn.createStatement().execute("CREATE SCHEMA " + SchemaUtil.getSchemaNameFromFullName(tableName));
    }
    String ddl = "CREATE TABLE " + tableName + " (PK1 VARCHAR not null, PK2 VARCHAR not null, "
            + "MYCF1.COL1 varchar,MYCF2.COL2 varchar " + "CONSTRAINT pk PRIMARY KEY(PK1,PK2)) MULTI_TENANT=true";
    conn.createStatement().execute(ddl);

    conn.createStatement().execute("UPSERT INTO " + tableName + " values ('a','b','c','d')");
    conn.commit();

    ResultSet rs = conn.createStatement()
            .executeQuery("select * from " + tableName + " where (pk1,pk2) IN (('a','b'),('b','b'))");
    assertTrue(rs.next());
    assertEquals("a", rs.getString(1));
    assertEquals("b", rs.getString(2));
    assertFalse(rs.next());
    conn.close();
}
 
Example #14
Source File: UpsertSelectAutoCommitIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@Test
public void testMaxMutationSize() throws Exception {
    Properties connectionProperties = new Properties();
    connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "3");
    connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "50000");
    connectionProperties.setProperty(QueryServices.ENABLE_SERVER_SIDE_UPSERT_MUTATIONS,
        allowServerSideMutations);
    PhoenixConnection connection =
            (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties);
    connection.setAutoCommit(true);
    String fullTableName = generateUniqueName();
    try (Statement stmt = connection.createStatement()) {
        stmt.execute(
                "CREATE TABLE " + fullTableName + " (pk INTEGER PRIMARY KEY, v1 INTEGER, v2 INTEGER)");
        stmt.execute(
                "CREATE SEQUENCE " + fullTableName + "_seq cache 1000");
        stmt.execute("UPSERT INTO " + fullTableName + " VALUES (NEXT VALUE FOR " + fullTableName + "_seq, rand(), rand())");
    }
    try (Statement stmt = connection.createStatement()) {
        for (int i=0; i<16; i++) {
            stmt.execute("UPSERT INTO " + fullTableName + " SELECT NEXT VALUE FOR " + fullTableName + "_seq, rand(), rand() FROM " + fullTableName);
        }
    }
    connection.close();
}
 
Example #15
Source File: IndexToolIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static synchronized void setup() throws Exception {
    Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
    serverProps.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
    serverProps.put(QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, Long.toString(5));
    serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
        QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
    serverProps.put(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS, Long.toString(8));
    Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
    clientProps.put(QueryServices.USE_STATS_FOR_PARALLELIZATION, Boolean.toString(true));
    clientProps.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Long.toString(5));
    clientProps.put(QueryServices.TRANSACTIONS_ENABLED, Boolean.TRUE.toString());
    clientProps.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.TRUE.toString());
    setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
        new ReadOnlyProps(clientProps.entrySet().iterator()));
}
 
Example #16
Source File: StatementContext.java    From phoenix with Apache License 2.0 6 votes vote down vote up
public StatementContext(PhoenixStatement statement, ColumnResolver resolver, Scan scan, SequenceManager seqManager) {
    this.statement = statement;
    this.resolver = resolver;
    this.scan = scan;
    this.sequences = seqManager;
    this.binds = new BindManager(statement.getParameters());
    this.aggregates = new AggregationManager();
    this.expressions = new ExpressionManager();
    PhoenixConnection connection = statement.getConnection();
    this.dateFormat = connection.getQueryServices().getProps().get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT);
    this.dateFormatter = DateUtil.getDateFormatter(dateFormat);
    this.timeFormat = connection.getQueryServices().getProps().get(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT);
    this.timeFormatter = DateUtil.getTimeFormatter(timeFormat);
    this.timestampFormat = connection.getQueryServices().getProps().get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, DateUtil.DEFAULT_TIMESTAMP_FORMAT);
    this.timestampFormatter = DateUtil.getTimestampFormatter(timestampFormat);
    this.dateFormatTimeZone = TimeZone.getTimeZone(
            connection.getQueryServices().getProps().get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, DateUtil.DEFAULT_TIME_ZONE_ID));
    this.numberFormat = connection.getQueryServices().getProps().get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT);
    this.tempPtr = new ImmutableBytesWritable();
    this.currentTable = resolver != null && !resolver.getTables().isEmpty() ? resolver.getTables().get(0) : null;
    this.whereConditionColumns = new ArrayList<Pair<byte[],byte[]>>();
    this.dataColumns = this.currentTable == null ? Collections.<PColumn, Integer>emptyMap() : Maps.<PColumn, Integer>newLinkedHashMap();
    this.subqueryResults = Maps.<SelectStatement, Object>newHashMap();
}
 
Example #17
Source File: ContextClassloaderIT.java    From phoenix with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    setUpConfigForMiniCluster(conf);
    hbaseTestUtil = new HBaseTestingUtility(conf);
    hbaseTestUtil.startMiniCluster();
    String clientPort = hbaseTestUtil.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
    url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort
            + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;
    driver = initAndRegisterDriver(url, ReadOnlyProps.EMPTY_PROPS);
    
    Connection conn = DriverManager.getConnection(url);
    Statement stmt = conn.createStatement();
    stmt.execute("CREATE TABLE test (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR)");
    stmt.execute("UPSERT INTO test VALUES (1, 'name1')");
    stmt.execute("UPSERT INTO test VALUES (2, 'name2')");
    stmt.close();
    conn.commit();
    conn.close();
    badContextClassloader = new URLClassLoader(new URL[] {
            File.createTempFile("invalid", ".jar").toURI().toURL() }, null);
}
 
Example #18
Source File: DisableLocalIndexIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@BeforeClass
@Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class)
public static void doSetup() throws Exception {
    Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
    // Must update config before starting server
    props.put(QueryServices.ALLOW_LOCAL_INDEX_ATTRIB, Boolean.FALSE.toString());
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
 
Example #19
Source File: QueryOptimizer.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public QueryOptimizer(QueryServices services) {
    this.services = services;
    this.useIndexes = this.services.getProps().getBoolean(QueryServices.USE_INDEXES_ATTRIB, QueryServicesOptions.DEFAULT_USE_INDEXES);
    this.costBased = this.services.getProps().getBoolean(QueryServices.COST_BASED_OPTIMIZER_ENABLED, QueryServicesOptions.DEFAULT_COST_BASED_OPTIMIZER_ENABLED);
    this.indexPendingDisabledThreshold = this.services.getProps().getLong(QueryServices.INDEX_PENDING_DISABLE_THRESHOLD,
        QueryServicesOptions.DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD);
}
 
Example #20
Source File: UpgradeUtil.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private static void mapTableToNamespace(Admin admin, Table metatable, String srcTableName,
        String destTableName, ReadOnlyProps props, Long ts, String phoenixTableName, PTableType pTableType,PName tenantId)
                throws SnapshotCreationException, IllegalArgumentException, IOException, InterruptedException,
                SQLException {
    if (!SchemaUtil.isNamespaceMappingEnabled(pTableType,
            props)) { throw new IllegalArgumentException(SchemaUtil.isSystemTable(srcTableName.getBytes())
                    ? "For system table " + QueryServices.IS_SYSTEM_TABLE_MAPPED_TO_NAMESPACE
                            + " also needs to be enabled along with " + QueryServices.IS_NAMESPACE_MAPPING_ENABLED
                    : QueryServices.IS_NAMESPACE_MAPPING_ENABLED + " is not enabled"); }
    mapTableToNamespace(admin, srcTableName, destTableName, pTableType);

    byte[] tableKey = SchemaUtil.getTableKey(tenantId != null ? tenantId.getString() : null,
            SchemaUtil.getSchemaNameFromFullName(phoenixTableName),
            SchemaUtil.getTableNameFromFullName(phoenixTableName));
    List<Cell> columnCells = metatable.get(new Get(tableKey))
            .getColumnCells(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES, QueryConstants.EMPTY_COLUMN_BYTES);
    if (ts == null) {
        if (!columnCells.isEmpty()) {
            ts = columnCells.get(0).getTimestamp();
        } else if (PTableType.SYSTEM != pTableType) { throw new IllegalArgumentException(
                "Timestamp passed is null and cannot derive timestamp for " + tableKey + " from meta table!!"); }
    }
    if (ts != null) {
        // Update flag to represent table is mapped to namespace
        LOGGER.info(String.format("Updating meta information of phoenix table '%s' to map to namespace..",
                phoenixTableName));
        Put put = new Put(tableKey, ts);
        put.addColumn(QueryConstants.DEFAULT_COLUMN_FAMILY_BYTES, PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES,
                PBoolean.INSTANCE.toBytes(Boolean.TRUE));
        metatable.put(put);
    }
}
 
Example #21
Source File: SpoolingResultIterator.java    From phoenix with Apache License 2.0 5 votes vote down vote up
private SpoolingResultIterator(SpoolingMetricsHolder spoolMetrics, MemoryMetricsHolder memoryMetrics, ResultIterator scanner, QueryServices services) throws SQLException {
    this (spoolMetrics, memoryMetrics, scanner, services.getMemoryManager(),
            services.getProps().getLong(QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB,
                QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES),
            services.getProps().getLong(QueryServices.MAX_SPOOL_TO_DISK_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SPOOL_TO_DISK_BYTES),
            services.getProps().get(QueryServices.SPOOL_DIRECTORY, QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY));
}
 
Example #22
Source File: IndexToolForPartialBuildWithNamespaceEnabledIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@BeforeClass
@Shadower(classBeingShadowed = IndexToolForPartialBuildIT.class)
public static void doSetup() throws Exception {
    Map<String, String> serverProps = getServerProperties();
    serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
    Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(1);
    clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true");
    setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator()));
}
 
Example #23
Source File: DeleteIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testDeleteShouldNotFailWhenTheRowsMoreThanMaxMutationSize() throws Exception {
    String tableName = generateUniqueName();
    String indexName1 = generateUniqueName();
    String ddl =
            "CREATE TABLE IF NOT EXISTS "
                    + tableName
                    + " (pk1 DECIMAL NOT NULL, v1 VARCHAR, v2 VARCHAR CONSTRAINT PK PRIMARY KEY (pk1))"
                    + " IMMUTABLE_ROWS=true";
    String idx1 = "CREATE INDEX " + indexName1 + " ON " + tableName + "(v1)";
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    props.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB,Integer.toString(10));
    try (Connection conn = DriverManager.getConnection(getUrl(), props)) {
        conn.createStatement().execute(ddl);
        conn.createStatement().execute(idx1);
        Statement stmt = conn.createStatement();
        for(int i = 0; i < 20; i++) {
            stmt.executeUpdate("UPSERT INTO " + tableName + " VALUES ("+i+",'value"+i+"', 'value2')");
            if (i % 10 == 0) {
                conn.commit();
            }
        }
        conn.commit();
        conn.setAutoCommit(true);
        try {
            conn.createStatement().execute("DELETE FROM " + tableName);
        } catch (Exception e) {
            fail("Should not throw any exception");
        }
    }
}
 
Example #24
Source File: SubqueryIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@BeforeClass
@Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class)
public static void doSetup() throws Exception {
    Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
    // Forces server cache to be used
    props.put(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(2));
    // Must update config before starting server
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
 
Example #25
Source File: PTableRefFactory.java    From phoenix with Apache License 2.0 5 votes vote down vote up
public static PTableRefFactory getFactory(ReadOnlyProps props) {
    String encodingEnumString =
            props.get(QueryServices.CLIENT_CACHE_ENCODING,
                QueryServicesOptions.DEFAULT_CLIENT_CACHE_ENCODING);
    Encoding encoding = Encoding.valueOf(encodingEnumString.toUpperCase());
    switch (encoding) {
    case PROTOBUF:
        return SerializedPTableRefFactory.getFactory();
    case OBJECT:
    default:
        return INSTANCE;
    }
}
 
Example #26
Source File: UseSchemaIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Test
public void testSequences() throws Exception {
    Properties props = new Properties();
    String schema = generateUniqueName();
    props.setProperty(QueryServices.SCHEMA_ATTRIB, schema);
    props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(true));
    Connection conn = DriverManager.getConnection(getUrl(), props);
    conn.setAutoCommit(true);
    String ddl = "CREATE SCHEMA IF NOT EXISTS " + schema;
    conn.createStatement().execute(ddl);
    String sequenceName = generateUniqueName();
    ddl = "create SEQUENCE "+schema + "." + sequenceName + " START WITH 100 INCREMENT BY 2 CACHE 10";
    conn.createStatement().execute(ddl);
    String query = "SELECT NEXT VALUE FOR "+schema + "." + sequenceName;
    ResultSet rs = conn.createStatement().executeQuery(query);
    assertTrue(rs.next());
    assertEquals("100", rs.getString(1));
    conn.createStatement().execute("DROP Sequence " + schema + "." + sequenceName);
    
    schema = generateUniqueName();
    sequenceName = generateUniqueName();
    ddl = "CREATE SCHEMA " + schema;
    conn.createStatement().execute(ddl);
    conn.createStatement().execute("use " + schema);
    ddl = "create SEQUENCE "+ sequenceName + " START WITH 100 INCREMENT BY 2 CACHE 10";
    conn.createStatement().execute(ddl);
    query = "SELECT NEXT VALUE FOR "+sequenceName;
    rs = conn.createStatement().executeQuery(query);
    assertTrue(rs.next());
    assertEquals("100", rs.getString(1));
    query = "SELECT CURRENT VALUE FOR "+sequenceName;
    rs = conn.createStatement().executeQuery(query);
    assertTrue(rs.next());
    assertEquals("100", rs.getString(1));
    conn.createStatement().execute("DROP Sequence " + sequenceName);
    conn.close();
}
 
Example #27
Source File: ParallelIteratorsIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void doSetup() throws Exception {
    Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
    // Must update config before starting server
    props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
    props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
 
Example #28
Source File: SkipScanBigFilterTest.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@BeforeClass
@Shadower(classBeingShadowed = BaseConnectionlessQueryTest.class)
public static void doSetup() throws Exception {
    Map<String,String> props = Maps.newHashMapWithExpectedSize(1);
    // enables manual splitting on salted tables
    props.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.toString(false));
    initDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
 
Example #29
Source File: StatsCollectorAbstractIT.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void doSetup() throws Exception {
    Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
    // Must update config before starting server
    props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
    props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, Boolean.TRUE.toString());
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
 
Example #30
Source File: RegexpSplitParseNode.java    From phoenix with Apache License 2.0 5 votes vote down vote up
@Override
public Expression create(List<Expression> children, StatementContext context)
        throws SQLException {
    QueryServices services = context.getConnection().getQueryServices();
    boolean useByteBasedRegex =
            services.getProps().getBoolean(QueryServices.USE_BYTE_BASED_REGEX_ATTRIB,
                QueryServicesOptions.DEFAULT_USE_BYTE_BASED_REGEX);
    if (useByteBasedRegex) {
        return new ByteBasedRegexpSplitFunction(children);
    } else {
        return new StringBasedRegexpSplitFunction(children);
    }
}