Java Code Examples for org.apache.phoenix.query.QueryServices

The following examples show how to use org.apache.phoenix.query.QueryServices. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: phoenix   Source File: StatementContext.java    License: Apache License 2.0 6 votes vote down vote up
public StatementContext(PhoenixStatement statement, ColumnResolver resolver, Scan scan, SequenceManager seqManager) {
    this.statement = statement;
    this.resolver = resolver;
    this.scan = scan;
    this.sequences = seqManager;
    this.binds = new BindManager(statement.getParameters());
    this.aggregates = new AggregationManager();
    this.expressions = new ExpressionManager();
    PhoenixConnection connection = statement.getConnection();
    this.dateFormat = connection.getQueryServices().getProps().get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT);
    this.dateFormatter = DateUtil.getDateFormatter(dateFormat);
    this.timeFormat = connection.getQueryServices().getProps().get(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT);
    this.timeFormatter = DateUtil.getTimeFormatter(timeFormat);
    this.timestampFormat = connection.getQueryServices().getProps().get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, DateUtil.DEFAULT_TIMESTAMP_FORMAT);
    this.timestampFormatter = DateUtil.getTimestampFormatter(timestampFormat);
    this.dateFormatTimeZone = TimeZone.getTimeZone(
            connection.getQueryServices().getProps().get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, DateUtil.DEFAULT_TIME_ZONE_ID));
    this.numberFormat = connection.getQueryServices().getProps().get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT);
    this.tempPtr = new ImmutableBytesWritable();
    this.currentTable = resolver != null && !resolver.getTables().isEmpty() ? resolver.getTables().get(0) : null;
    this.whereConditionColumns = new ArrayList<Pair<byte[],byte[]>>();
    this.dataColumns = this.currentTable == null ? Collections.<PColumn, Integer>emptyMap() : Maps.<PColumn, Integer>newLinkedHashMap();
    this.subqueryResults = Maps.<SelectStatement, Object>newHashMap();
}
 
Example 2
Source Project: phoenix   Source File: IndexToolIT.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static synchronized void setup() throws Exception {
    Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2);
    serverProps.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
    serverProps.put(QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, Long.toString(5));
    serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
        QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
    serverProps.put(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS, Long.toString(8));
    Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2);
    clientProps.put(QueryServices.USE_STATS_FOR_PARALLELIZATION, Boolean.toString(true));
    clientProps.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Long.toString(5));
    clientProps.put(QueryServices.TRANSACTIONS_ENABLED, Boolean.TRUE.toString());
    clientProps.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.TRUE.toString());
    setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
        new ReadOnlyProps(clientProps.entrySet().iterator()));
}
 
Example 3
Source Project: phoenix   Source File: UpsertSelectAutoCommitIT.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testMaxMutationSize() throws Exception {
    Properties connectionProperties = new Properties();
    connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_ATTRIB, "3");
    connectionProperties.setProperty(QueryServices.MAX_MUTATION_SIZE_BYTES_ATTRIB, "50000");
    connectionProperties.setProperty(QueryServices.ENABLE_SERVER_SIDE_UPSERT_MUTATIONS,
        allowServerSideMutations);
    PhoenixConnection connection =
            (PhoenixConnection) DriverManager.getConnection(getUrl(), connectionProperties);
    connection.setAutoCommit(true);
    String fullTableName = generateUniqueName();
    try (Statement stmt = connection.createStatement()) {
        stmt.execute(
                "CREATE TABLE " + fullTableName + " (pk INTEGER PRIMARY KEY, v1 INTEGER, v2 INTEGER)");
        stmt.execute(
                "CREATE SEQUENCE " + fullTableName + "_seq cache 1000");
        stmt.execute("UPSERT INTO " + fullTableName + " VALUES (NEXT VALUE FOR " + fullTableName + "_seq, rand(), rand())");
    }
    try (Statement stmt = connection.createStatement()) {
        for (int i=0; i<16; i++) {
            stmt.execute("UPSERT INTO " + fullTableName + " SELECT NEXT VALUE FOR " + fullTableName + "_seq, rand(), rand() FROM " + fullTableName);
        }
    }
    connection.close();
}
 
Example 4
Source Project: phoenix   Source File: IndexUpgradeTool.java    License: Apache License 2.0 6 votes vote down vote up
private static void setRpcRetriesAndTimeouts(Configuration conf) {
    long indexRebuildQueryTimeoutMs =
            conf.getLong(QueryServices.INDEX_REBUILD_QUERY_TIMEOUT_ATTRIB,
                    QueryServicesOptions.DEFAULT_INDEX_REBUILD_QUERY_TIMEOUT);
    long indexRebuildRPCTimeoutMs =
            conf.getLong(QueryServices.INDEX_REBUILD_RPC_TIMEOUT_ATTRIB,
                    QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_TIMEOUT);
    long indexRebuildClientScannerTimeOutMs =
            conf.getLong(QueryServices.INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT_ATTRIB,
                    QueryServicesOptions.DEFAULT_INDEX_REBUILD_CLIENT_SCANNER_TIMEOUT);
    int indexRebuildRpcRetriesCounter =
            conf.getInt(QueryServices.INDEX_REBUILD_RPC_RETRIES_COUNTER,
                    QueryServicesOptions.DEFAULT_INDEX_REBUILD_RPC_RETRIES_COUNTER);

    // Set phoenix and hbase level timeouts and rpc retries
    conf.setLong(QueryServices.THREAD_TIMEOUT_MS_ATTRIB, indexRebuildQueryTimeoutMs);
    conf.setLong(HConstants.HBASE_RPC_TIMEOUT_KEY, indexRebuildRPCTimeoutMs);
    conf.setLong(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
            indexRebuildClientScannerTimeOutMs);
    conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, indexRebuildRpcRetriesCounter);
}
 
Example 5
Source Project: phoenix   Source File: BasePermissionsIT.java    License: Apache License 2.0 6 votes vote down vote up
private static void enablePhoenixHBaseAuthorization(Configuration config,
                                                    boolean useCustomAccessController) {
    config.set("hbase.superuser", SUPER_USER + "," + "superUser2");
    config.set("hbase.security.authorization", Boolean.TRUE.toString());
    config.set("hbase.security.exec.permission.checks", Boolean.TRUE.toString());
    if(useCustomAccessController) {
        config.set("hbase.coprocessor.master.classes",
                CustomAccessController.class.getName());
        config.set("hbase.coprocessor.region.classes",
                CustomAccessController.class.getName());
        config.set("hbase.coprocessor.regionserver.classes",
                CustomAccessController.class.getName());
    } else {
        config.set("hbase.coprocessor.master.classes",
                "org.apache.hadoop.hbase.security.access.AccessController");
        config.set("hbase.coprocessor.region.classes",
                "org.apache.hadoop.hbase.security.access.AccessController");
        config.set("hbase.coprocessor.regionserver.classes",
                "org.apache.hadoop.hbase.security.access.AccessController");
    }
    config.set(QueryServices.PHOENIX_ACLS_ENABLED,"true");

    config.set("hbase.regionserver.wal.codec", "org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec");
}
 
Example 6
Source Project: phoenix   Source File: DeleteIT.java    License: Apache License 2.0 6 votes vote down vote up
private void testDeleteFilter(boolean autoCommit) throws Exception {
    Properties props = new Properties();
    props.setProperty(QueryServices.ENABLE_SERVER_SIDE_DELETE_MUTATIONS,
        allowServerSideMutations);
    Connection conn = DriverManager.getConnection(getUrl(), props);
    String tableName = initTableValues(conn);

    assertTableCount(conn, tableName, NUMBER_OF_ROWS);
    
    conn.setAutoCommit(autoCommit);
    String deleteStmt = "DELETE FROM " + tableName + " WHERE 20 = j";
    assertEquals(1,conn.createStatement().executeUpdate(deleteStmt));
    if (!autoCommit) {
        conn.commit();
    }

    assertTableCount(conn, tableName, NUMBER_OF_ROWS - 1);
}
 
Example 7
Source Project: phoenix   Source File: NonTxIndexBuilderTest.java    License: Apache License 2.0 6 votes vote down vote up
private IndexMaintainer getTestIndexMaintainer() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TestUtil.TEST_PROPERTIES);
    // disable column encoding, makes debugging easier
    props.put(QueryServices.DEFAULT_COLUMN_ENCODED_BYTES_ATRRIB, "0");
    Connection conn = DriverManager.getConnection(getUrl(), props);
    try {
        conn.setAutoCommit(true);
        conn.createStatement().execute(TEST_TABLE_DDL);
        conn.createStatement().execute(TEST_TABLE_INDEX_DDL);
        PhoenixConnection pconn = conn.unwrap(PhoenixConnection.class);
        PTable table = pconn.getTable(new PTableKey(pconn.getTenantId(), TEST_TABLE_STRING));
        ImmutableBytesWritable ptr = new ImmutableBytesWritable();
        table.getIndexMaintainers(ptr, pconn);
        List<IndexMaintainer> indexMaintainerList =
                IndexMaintainer.deserialize(ptr, GenericKeyValueBuilder.INSTANCE, true);
        assertEquals(1, indexMaintainerList.size());
        IndexMaintainer indexMaintainer = indexMaintainerList.get(0);
        return indexMaintainer;
    } finally {
        conn.close();
    }
}
 
Example 8
Source Project: phoenix   Source File: ServerCacheClient.java    License: Apache License 2.0 6 votes vote down vote up
public ServerCache(byte[] id, Set<HRegionLocation> servers, ImmutableBytesWritable cachePtr,
        ConnectionQueryServices services, boolean storeCacheOnClient) throws IOException {
    maxServerCacheTTL = services.getProps().getInt(
            QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB,
            QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_TIME_TO_LIVE_MS);
    this.id = id;
    this.servers = new HashMap();
    long currentTime = EnvironmentEdgeManager.currentTimeMillis();
    for(HRegionLocation loc : servers) {
        this.servers.put(loc, currentTime);
    }
    this.size =  cachePtr.getLength();
    if (storeCacheOnClient) {
        try {
            this.chunk = services.getMemoryManager().allocate(cachePtr.getLength());
            this.cachePtr = cachePtr;
        } catch (InsufficientMemoryException e) {
            this.outputFile = File.createTempFile("HashJoinCacheSpooler", ".bin", new File(services.getProps()
                    .get(QueryServices.SPOOL_DIRECTORY, QueryServicesOptions.DEFAULT_SPOOL_DIRECTORY)));
            try (OutputStream fio = Files.newOutputStream(outputFile.toPath())) {
                fio.write(cachePtr.get(), cachePtr.getOffset(), cachePtr.getLength());
            }
        }
    }
    
}
 
Example 9
Source Project: phoenix   Source File: StoreNullsPropIT.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSetStoreNullsDefaultViaConfig() throws SQLException {
    Properties props = new Properties();
    props.setProperty(QueryServices.DEFAULT_STORE_NULLS_ATTRIB, "true");
    Connection storeNullsConn = DriverManager.getConnection(getUrl(), props);

    Statement stmt = storeNullsConn.createStatement();
    stmt.execute("CREATE TABLE with_nulls_default (" +
            "id smallint primary key," +
            "name varchar)");

    ResultSet rs = stmt.executeQuery("SELECT store_nulls FROM \"SYSTEM\".CATALOG " +
            "WHERE table_name = 'WITH_NULLS_DEFAULT' AND store_nulls is not null");
    assertTrue(rs.next());
    assertTrue(rs.getBoolean(1));
}
 
Example 10
Source Project: phoenix   Source File: SpillableGroupByIT.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static synchronized void doSetup() throws Exception {
    Map<String, String> props = Maps.newHashMapWithExpectedSize(11);
    // Set a very small cache size to force plenty of spilling
    props.put(QueryServices.GROUPBY_MAX_CACHE_SIZE_ATTRIB,
            Integer.toString(1));
    props.put(QueryServices.GROUPBY_SPILLABLE_ATTRIB, String.valueOf(true));
    props.put(QueryServices.GROUPBY_SPILL_FILES_ATTRIB,
            Integer.toString(1));
    // Large enough to not run out of memory, but small enough to spill
    props.put(QueryServices.MAX_MEMORY_SIZE_ATTRIB, Integer.toString(40000));
    
    // Set guidepost width, but disable stats
    props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
    props.put(QueryServices.STATS_COLLECTION_ENABLED, Boolean.toString(false));
    props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, Boolean.TRUE.toString());
    props.put(QueryServices.EXPLAIN_ROW_COUNT_ATTRIB, Boolean.TRUE.toString());
    // Must update config before starting server
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
 
Example 11
Source Project: phoenix   Source File: PhoenixServerRpcIT.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testUpsertSelectServerDisabled() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    // disable server side upsert select
    props.setProperty(QueryServices.ENABLE_SERVER_UPSERT_SELECT, "false");
    try (Connection conn = driver.connect(getUrl(), props)) {
        // create two tables with identical schemas
        createTable(conn, dataTableFullName);
        upsertRow(conn, dataTableFullName);
        String tableName2 = dataTableFullName + "_2";
        createTable(conn, tableName2);
        ensureTablesOnDifferentRegionServers(dataTableFullName, tableName2);
        // copy the row from the first table using upsert select
        upsertSelectRows(conn, dataTableFullName, tableName2);
        Mockito.verify(TestPhoenixIndexRpcSchedulerFactory.getIndexRpcExecutor(),
                Mockito.never()).dispatch(Mockito.any(CallRunner.class));

    }
}
 
Example 12
Source Project: phoenix   Source File: MutationStateTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testPendingMutationsOnDDL() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    props.setProperty(QueryServices.PENDING_MUTATIONS_DDL_THROW_ATTRIB, "true");
    try (Connection conn = DriverManager.getConnection(getUrl(), props);
            PhoenixConnection pConnSpy = spy((PhoenixConnection) conn)) {
        MutationState mutationState = mock(MutationState.class);
        when(mutationState.getNumRows()).thenReturn(1);

        // Create a connection with mutation state and mock it
        doReturn(mutationState).when(pConnSpy).getMutationState();
        exceptionRule.expect(SQLException.class);
        exceptionRule.expectMessage(
            SQLExceptionCode.CANNOT_PERFORM_DDL_WITH_PENDING_MUTATIONS.getMessage());

        pConnSpy.createStatement().execute("create table MUTATION_TEST1"
                + "( id1 UNSIGNED_INT not null primary key," + "appId1 VARCHAR)");
    }

}
 
Example 13
Source Project: phoenix   Source File: DropIndexDuringUpsertIT.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void doSetup() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    setUpConfigForMiniCluster(conf);
    conf.setInt("hbase.client.retries.number", 2);
    conf.setInt("hbase.client.pause", 5000);
    conf.setInt("hbase.balancer.period", Integer.MAX_VALUE);
    conf.setLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB, 0);
    util = new HBaseTestingUtility(conf);
    util.startMiniCluster(NUM_SLAVES);
    String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
    url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort
            + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;

    Map<String, String> props = Maps.newHashMapWithExpectedSize(1);
    // Must update config before starting server
    props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true));
    driver = initAndRegisterDriver(url, new ReadOnlyProps(props.entrySet().iterator()));
}
 
Example 14
Source Project: phoenix   Source File: StoreNullsIT.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSetStoreNullsDefaultViaConfig() throws SQLException {
    Properties props = new Properties();
    props.setProperty(QueryServices.DEFAULT_STORE_NULLS_ATTRIB, "true");
    Connection storeNullsConn = DriverManager.getConnection(getUrl(), props);

    Statement stmt = storeNullsConn.createStatement();
    stmt.execute("CREATE TABLE with_nulls_default (" +
            "id smallint primary key," +
            "name varchar)");

    ResultSet rs = stmt.executeQuery("SELECT store_nulls FROM SYSTEM.CATALOG " +
            "WHERE table_name = 'WITH_NULLS_DEFAULT' AND store_nulls is not null");
    assertTrue(rs.next());
    assertTrue(rs.getBoolean(1));
}
 
Example 15
Source Project: phoenix   Source File: AggregatePlan.java    License: Apache License 2.0 6 votes vote down vote up
private ParallelIteratorFactory wrapParallelIteratorFactory () {
    ParallelIteratorFactory innerFactory;
    QueryServices services = context.getConnection().getQueryServices();
    if (groupBy.isEmpty() || groupBy.isOrderPreserving()) {
        if (ScanUtil.isPacingScannersPossible(context)) {
            innerFactory = ParallelIteratorFactory.NOOP_FACTORY;
        } else {
            innerFactory = new SpoolingResultIterator.SpoolingResultIteratorFactory(services);
        }
    } else {
        innerFactory = new OrderingResultIteratorFactory(services,this.getOrderBy());
    }
    if (parallelIteratorFactory == null) {
        return innerFactory;
    }
    // wrap any existing parallelIteratorFactory
    return new WrappingResultIteratorFactory(innerFactory, parallelIteratorFactory);
}
 
Example 16
Source Project: phoenix   Source File: TenantSpecificViewIndexIT.java    License: Apache License 2.0 6 votes vote down vote up
private void createTableAndValidate(String tableName, boolean isNamespaceEnabled) throws Exception {
    Properties props = new Properties();
    if (isNamespaceEnabled) {
        props.setProperty(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.toString(true));
    }
    Connection conn = DriverManager.getConnection(getUrl(), props);
    if (isNamespaceEnabled) {
        conn.createStatement().execute("CREATE SCHEMA " + SchemaUtil.getSchemaNameFromFullName(tableName));
    }
    String ddl = "CREATE TABLE " + tableName + " (PK1 VARCHAR not null, PK2 VARCHAR not null, "
            + "MYCF1.COL1 varchar,MYCF2.COL2 varchar " + "CONSTRAINT pk PRIMARY KEY(PK1,PK2)) MULTI_TENANT=true";
    conn.createStatement().execute(ddl);

    conn.createStatement().execute("UPSERT INTO " + tableName + " values ('a','b','c','d')");
    conn.commit();

    ResultSet rs = conn.createStatement()
            .executeQuery("select * from " + tableName + " where (pk1,pk2) IN (('a','b'),('b','b'))");
    assertTrue(rs.next());
    assertEquals("a", rs.getString(1));
    assertEquals("b", rs.getString(2));
    assertFalse(rs.next());
    conn.close();
}
 
Example 17
Source Project: phoenix   Source File: ContextClassloaderIT.java    License: Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setUpBeforeClass() throws Exception {
    Configuration conf = HBaseConfiguration.create();
    setUpConfigForMiniCluster(conf);
    hbaseTestUtil = new HBaseTestingUtility(conf);
    hbaseTestUtil.startMiniCluster();
    String clientPort = hbaseTestUtil.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB);
    url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort
            + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM;
    driver = initAndRegisterDriver(url, ReadOnlyProps.EMPTY_PROPS);
    
    Connection conn = DriverManager.getConnection(url);
    Statement stmt = conn.createStatement();
    stmt.execute("CREATE TABLE test (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR)");
    stmt.execute("UPSERT INTO test VALUES (1, 'name1')");
    stmt.execute("UPSERT INTO test VALUES (2, 'name2')");
    stmt.close();
    conn.commit();
    conn.close();
    badContextClassloader = new URLClassLoader(new URL[] {
            File.createTempFile("invalid", ".jar").toURI().toURL() }, null);
}
 
Example 18
public static Map<String, String> getDefaultProps() {
  Map<String, String> props = new HashMap<String, String>();
  // Must update config before starting server
  props.put(QueryServices.STATS_USE_CURRENT_TIME_ATTRIB,
    Boolean.FALSE.toString());
  props.put("java.security.krb5.realm", "");
  props.put("java.security.krb5.kdc", "");
  props.put(HConstants.REGIONSERVER_PORT, String.valueOf(HBaseTestingUtility.randomFreePort()));
  return props;
}
 
Example 19
Source Project: phoenix   Source File: KeyOnlyIT.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void doSetup() throws Exception {
    Map<String,String> props = Maps.newHashMapWithExpectedSize(3);
    // Must update config before starting server
    props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(50));
    props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(100));
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
 
Example 20
Source Project: phoenix   Source File: MetaDataRegionObserver.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void start(CoprocessorEnvironment env) throws IOException {
    // sleep a little bit to compensate time clock skew when SYSTEM.CATALOG moves
    // among region servers because we relies on server time of RS which is hosting
    // SYSTEM.CATALOG
    Configuration config = env.getConfiguration();
    long sleepTime = config.getLong(QueryServices.CLOCK_SKEW_INTERVAL_ATTRIB,
        QueryServicesOptions.DEFAULT_CLOCK_SKEW_INTERVAL);
    try {
        if(sleepTime > 0) {
            Thread.sleep(sleepTime);
        }
    } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
    }
    enableRebuildIndex =
            config.getBoolean(
                QueryServices.INDEX_FAILURE_HANDLING_REBUILD_ATTRIB,
                QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD);
    rebuildIndexTimeInterval =
            config.getLong(
                QueryServices.INDEX_FAILURE_HANDLING_REBUILD_INTERVAL_ATTRIB,
                QueryServicesOptions.DEFAULT_INDEX_FAILURE_HANDLING_REBUILD_INTERVAL);
    initialRebuildTaskDelay =
            config.getLong(
                QueryServices.INDEX_REBUILD_TASK_INITIAL_DELAY,
                QueryServicesOptions.DEFAULT_INDEX_REBUILD_TASK_INITIAL_DELAY);
}
 
Example 21
Source Project: phoenix   Source File: BaseResultIterators.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void explain(List<String> planSteps) {
    boolean displayChunkCount = context.getConnection().getQueryServices().getProps().getBoolean(
            QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB,
            QueryServicesOptions.DEFAULT_EXPLAIN_CHUNK_COUNT);
    StringBuilder buf = new StringBuilder();
    buf.append("CLIENT ");
    if (displayChunkCount) {
        boolean displayRowCount = context.getConnection().getQueryServices().getProps().getBoolean(
                QueryServices.EXPLAIN_ROW_COUNT_ATTRIB,
                QueryServicesOptions.DEFAULT_EXPLAIN_ROW_COUNT);
        buf.append(this.splits.size()).append("-CHUNK ");
        if (displayRowCount && estimatedRows != null) {
            buf.append(estimatedRows).append(" ROWS ");
            buf.append(estimatedSize).append(" BYTES ");
        }
    }
    buf.append(getName()).append(" ").append(size()).append("-WAY ");
    
    if(this.plan.getStatement().getTableSamplingRate()!=null){
    	buf.append(plan.getStatement().getTableSamplingRate()/100D).append("-").append("SAMPLED ");
    }
    try {
        if (plan.useRoundRobinIterator()) {
            buf.append("ROUND ROBIN ");
        }
    } catch (SQLException e) {
        throw new RuntimeException(e);
    }

    if(this.plan instanceof ScanPlan) {
        ScanPlan scanPlan = (ScanPlan) this.plan;
        if(scanPlan.getRowOffset().isPresent()) {
            buf.append("With RVC Offset " + "0x" + Hex.encodeHexString(scanPlan.getRowOffset().get()) + " ");
        }
    }

    explain(buf.toString(),planSteps);
}
 
Example 22
Source Project: phoenix   Source File: PhoenixStatement.java    License: Apache License 2.0 5 votes vote down vote up
@Override
   public int getFetchSize() throws SQLException {
if (fetchSize>0)
               return fetchSize;
       else
       	return connection.getQueryServices().getProps().getInt(QueryServices.SCAN_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_SCAN_CACHE_SIZE);
   }
 
Example 23
Source Project: phoenix   Source File: UpdateCacheConnectionLevelPropIT.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Helper method that sets up the connections and creates the table to be tested.
 * @param fullTableName The table's full name
 * @param tableUpdateCacheFrequency If not null, the table-level value to be set for 'Update
 *                                  Cache Frequency'
 * @param connUpdateCacheFrequency If not null, the connection-level value to be set for 'Update
 *                                 Cache Frequency'
 */
private static void setUpTableAndConnections(String fullTableName,
        String tableUpdateCacheFrequency, String connUpdateCacheFrequency) throws SQLException {
    // Create two connections - a connection that we'll use to create the table and the second
    // one that we will spy on and use to query the table.
    Properties props = new Properties();
    conn1 = DriverManager.getConnection(getUrl(), props);
    conn1.setAutoCommit(true);

    if (connUpdateCacheFrequency != null) {
        props.put(QueryServices.DEFAULT_UPDATE_CACHE_FREQUENCY_ATRRIB,
                connUpdateCacheFrequency);
    }

    // use a spied ConnectionQueryServices so we can verify calls to getTable()
    spyForConn2 = Mockito.spy(driver.getConnectionQueryServices(getUrl(), props));
    conn2 = spyForConn2.connect(getUrl(), props);
    conn2.setAutoCommit(true);

    String createTableQuery =
            "CREATE TABLE " + fullTableName + " (k UNSIGNED_DOUBLE NOT NULL PRIMARY KEY, "
                    + "v1 UNSIGNED_DOUBLE, v2 UNSIGNED_DOUBLE, v3 UNSIGNED_DOUBLE)";

    if (tableUpdateCacheFrequency != null) {
        createTableQuery += " UPDATE_CACHE_FREQUENCY = " + tableUpdateCacheFrequency;
    }

    // Create the table over first connection
    try (Statement stmt = conn1.createStatement()) {
        stmt.execute(createTableQuery);
        stmt.execute("UPSERT INTO " + fullTableName + " VALUES (1, 2, 3, 4)");
    }
    conn1.commit();
}
 
Example 24
Source Project: phoenix   Source File: ToDateFunctionIT.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testUnsignedLongToTimestampCast() throws SQLException {
    Properties props = new Properties();
    props.setProperty(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, "GMT+1");
    Connection conn = DriverManager.getConnection(getUrl(), props);
    conn.setAutoCommit(false);
    try {
        conn.prepareStatement(
            "create table TT("
                    + "a unsigned_int not null, "
                    + "b unsigned_int not null, "
                    + "ts unsigned_long not null "
                    + "constraint PK primary key (a, b, ts))").execute();
        conn.commit();

        conn.prepareStatement("upsert into TT values (0, 22120, 1426188807198)").execute();
        conn.commit();
        
        ResultSet rs = conn.prepareStatement("select a, b, ts, CAST(ts AS TIMESTAMP) from TT").executeQuery();
        assertTrue(rs.next());
        assertEquals(new Date(1426188807198L), rs.getObject(4));
        rs.close();

        try {
            rs = conn.prepareStatement("select a, b, ts, CAST(b AS TIMESTAMP) from TT").executeQuery();
            fail();
        } catch (TypeMismatchException e) {

        }

    } finally {
        conn.close();
    }
}
 
Example 25
Source Project: phoenix   Source File: IndexToolForDeleteBeforeRebuildIT.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static synchronized void setup() throws Exception {
    Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(3);
    serverProps.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20));
    serverProps.put(QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, Long.toString(5));
    serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB,
        QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS);
    Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(4);
    clientProps.put(QueryServices.USE_STATS_FOR_PARALLELIZATION, Boolean.toString(true));
    clientProps.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Long.toString(5));
    clientProps.put(QueryServices.TRANSACTIONS_ENABLED, Boolean.TRUE.toString());
    clientProps.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.TRUE.toString());
    setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()),
        new ReadOnlyProps(clientProps.entrySet().iterator()));
}
 
Example 26
Source Project: phoenix   Source File: CountDistinctCompressionIT.java    License: Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static synchronized void doSetup() throws Exception {
    Map<String, String> props = Maps.newHashMapWithExpectedSize(3);
    // Must update config before starting server
    props.put(QueryServices.DISTINCT_VALUE_COMPRESS_THRESHOLD_ATTRIB, Long.toString(1));
    setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator()));
}
 
Example 27
/**
 * Start the mini-cluster with server-side namespace mapping property specified
 * @param isNamespaceMappingEnabled
 * @throws Exception
 */
private void startMiniClusterWithToggleNamespaceMapping(String isNamespaceMappingEnabled) throws Exception {
    testUtil = new HBaseTestingUtility();
    Configuration conf = testUtil.getConfiguration();
    conf.set(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, isNamespaceMappingEnabled);
    // Avoid multiple clusters trying to bind to the master's info port (16010)
    conf.setInt(HConstants.MASTER_INFO_PORT, -1);
    testUtil.startMiniCluster(1);
}
 
Example 28
Source Project: phoenix   Source File: PhoenixStatement.java    License: Apache License 2.0 5 votes vote down vote up
private void throwIfUnallowedUserDefinedFunctions(Map<String, UDFParseNode> udfParseNodes) throws SQLException {
    if (!connection
            .getQueryServices()
            .getProps()
            .getBoolean(QueryServices.ALLOW_USER_DEFINED_FUNCTIONS_ATTRIB,
                QueryServicesOptions.DEFAULT_ALLOW_USER_DEFINED_FUNCTIONS)) {
        if(udfParseNodes.isEmpty()) {
            throw new SQLExceptionInfo.Builder(SQLExceptionCode.UNALLOWED_USER_DEFINED_FUNCTIONS)
            .build().buildException();
        }
        throw new FunctionNotFoundException(udfParseNodes.keySet().toString());
    }
}
 
Example 29
Source Project: phoenix   Source File: AggregatePlan.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public PeekingResultIterator newIterator(StatementContext context, ResultIterator scanner, Scan scan) throws SQLException {
    Expression expression = RowKeyExpression.INSTANCE;
    OrderByExpression orderByExpression = new OrderByExpression(expression, false, true);
    int threshold = services.getProps().getInt(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB, QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES);
    return new OrderedResultIterator(scanner, Collections.<OrderByExpression>singletonList(orderByExpression), threshold);
}
 
Example 30
Source Project: phoenix   Source File: QueryUtil.java    License: Apache License 2.0 5 votes vote down vote up
public static String getConnectionUrl(Properties props, Configuration conf)
        throws ClassNotFoundException, SQLException {
    // make sure we load the phoenix driver
    Class.forName(PhoenixDriver.class.getName());

    // read the hbase properties from the configuration
    String server = ZKConfig.getZKQuorumServersString(conf);
    // could be a comma-separated list
    String[] rawServers = server.split(",");
    List<String> servers = new ArrayList<String>(rawServers.length);
    boolean first = true;
    int port = -1;
    for (String serverPort : rawServers) {
        try {
            server = Addressing.parseHostname(serverPort);
            int specifiedPort = Addressing.parsePort(serverPort);
            // there was a previously specified port and it doesn't match this server
            if (port > 0 && specifiedPort != port) {
                throw new IllegalStateException("Phoenix/HBase only supports connecting to a " +
                        "single zookeeper client port. Specify servers only as host names in " +
                        "HBase configuration");
            }
            // set the port to the specified port
            port = specifiedPort;
            servers.add(server);
        } catch (IllegalArgumentException e) {
        }
    }
    // port wasn't set, shouldn't ever happen from HBase, but just in case
    if (port == -1) {
        port = conf.getInt(QueryServices.ZOOKEEPER_PORT_ATTRIB, -1);
        if (port == -1) {
            throw new RuntimeException("Client zk port was not set!");
        }
    }
    server = Joiner.on(',').join(servers);

    return getUrl(server, port);
}