org.apache.phoenix.util.ReadOnlyProps Java Examples
The following examples show how to use
org.apache.phoenix.util.ReadOnlyProps.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ContextClassloaderIT.java From phoenix with Apache License 2.0 | 6 votes |
@BeforeClass public static synchronized void setUpBeforeClass() throws Exception { Configuration conf = HBaseConfiguration.create(); setUpConfigForMiniCluster(conf); hbaseTestUtil = new HBaseTestingUtility(conf); hbaseTestUtil.startMiniCluster(); String clientPort = hbaseTestUtil.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB); String url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; driver = initAndRegisterTestDriver(url, ReadOnlyProps.EMPTY_PROPS); Connection conn = DriverManager.getConnection(url); Statement stmt = conn.createStatement(); stmt.execute("CREATE TABLE test (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR)"); stmt.execute("UPSERT INTO test VALUES (1, 'name1')"); stmt.execute("UPSERT INTO test VALUES (2, 'name2')"); stmt.close(); conn.commit(); conn.close(); badContextClassloader = new URLClassLoader(new URL[] { File.createTempFile("invalid", ".jar").toURI().toURL() }, null); }
Example #2
Source File: MutableIndexFailureWithNamespaceIT.java From phoenix with Apache License 2.0 | 6 votes |
@BeforeClass public static synchronized void doSetup() throws Exception { Map<String, String> serverProps = getServerProps(); serverProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.TRUE.toString()); Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(3); clientProps.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, Boolean.TRUE.toString()); clientProps.put(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "2"); clientProps.put(QueryServices.INDEX_REGION_OBSERVER_ENABLED_ATTRIB, Boolean.FALSE.toString()); NUM_SLAVES_BASE = 4; setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator())); TableName systemTable = SchemaUtil.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, true); indexRebuildTaskRegionEnvironment = getUtility() .getRSForFirstRegionInTable(systemTable).getRegions(systemTable).get(0).getCoprocessorHost() .findCoprocessorEnvironment(MetaDataRegionObserver.class.getName()); MetaDataRegionObserver.initRebuildIndexConnectionProps(indexRebuildTaskRegionEnvironment.getConfiguration()); }
Example #3
Source File: ContextClassloaderIT.java From phoenix with Apache License 2.0 | 6 votes |
@BeforeClass public static void setUpBeforeClass() throws Exception { Configuration conf = HBaseConfiguration.create(); setUpConfigForMiniCluster(conf); hbaseTestUtil = new HBaseTestingUtility(conf); hbaseTestUtil.startMiniCluster(); String clientPort = hbaseTestUtil.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB); url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; driver = initAndRegisterDriver(url, ReadOnlyProps.EMPTY_PROPS); Connection conn = DriverManager.getConnection(url); Statement stmt = conn.createStatement(); stmt.execute("CREATE TABLE test (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR)"); stmt.execute("UPSERT INTO test VALUES (1, 'name1')"); stmt.execute("UPSERT INTO test VALUES (2, 'name2')"); stmt.close(); conn.commit(); conn.close(); badContextClassloader = new URLClassLoader(new URL[] { File.createTempFile("invalid", ".jar").toURI().toURL() }, null); }
Example #4
Source File: IndexVerificationOldDesignIT.java From phoenix with Apache License 2.0 | 6 votes |
@BeforeClass public static synchronized void setup() throws Exception { Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2); serverProps.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20)); serverProps.put(QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, Long.toString(5)); serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS); serverProps.put(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS, Long.toString(8)); Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2); clientProps.put(QueryServices.USE_STATS_FOR_PARALLELIZATION, Boolean.toString(true)); clientProps.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Long.toString(5)); clientProps.put(QueryServices.TRANSACTIONS_ENABLED, Boolean.TRUE.toString()); clientProps.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.TRUE.toString()); clientProps.put(QueryServices.INDEX_REGION_OBSERVER_ENABLED_ATTRIB, Boolean.toString(false)); setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator())); }
Example #5
Source File: IndexToolIT.java From phoenix with Apache License 2.0 | 6 votes |
@BeforeClass public static synchronized void setup() throws Exception { Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2); serverProps.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20)); serverProps.put(QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, Long.toString(5)); serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS); serverProps.put(QueryServices.INDEX_REBUILD_PAGE_SIZE_IN_ROWS, Long.toString(8)); Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2); clientProps.put(QueryServices.USE_STATS_FOR_PARALLELIZATION, Boolean.toString(true)); clientProps.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Long.toString(5)); clientProps.put(QueryServices.TRANSACTIONS_ENABLED, Boolean.TRUE.toString()); clientProps.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.TRUE.toString()); setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet().iterator())); }
Example #6
Source File: QueryTimeoutIT.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testSetRPCTimeOnConnection() throws Exception { Properties overriddenProps = PropertiesUtil.deepCopy(TEST_PROPERTIES); overriddenProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS); overriddenProps.setProperty("hbase.rpc.timeout", Long.toString(100)); String url = QueryUtil.getConnectionUrl(overriddenProps, config, "longRunning"); Connection conn1 = DriverManager.getConnection(url, overriddenProps); ConnectionQueryServices s1 = conn1.unwrap(PhoenixConnection.class).getQueryServices(); ReadOnlyProps configProps = s1.getProps(); assertEquals("100", configProps.get("hbase.rpc.timeout")); Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES); props.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS); Connection conn2 = DriverManager.getConnection(getUrl(), props); ConnectionQueryServices s2 = conn2.unwrap(PhoenixConnection.class).getQueryServices(); assertFalse(s1 == s2); Connection conn3 = DriverManager.getConnection(getUrl(), props); ConnectionQueryServices s3 = conn3.unwrap(PhoenixConnection.class).getQueryServices(); assertTrue(s2 == s3); Connection conn4 = DriverManager.getConnection(url, overriddenProps); ConnectionQueryServices s4 = conn4.unwrap(PhoenixConnection.class).getQueryServices(); assertTrue(s1 == s4); }
Example #7
Source File: PMetaDataImplTest.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void shouldAlwaysKeepOneEntryIfMaxSizeIsZero() throws Exception { TestTimeKeeper timeKeeper = new TestTimeKeeper(); Map<String, String> props = Maps.newHashMapWithExpectedSize(2); props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "0"); props.put(QueryServices.CLIENT_CACHE_ENCODING, "object"); PMetaData metaData = new PMetaDataImpl(5, timeKeeper, new ReadOnlyProps(props)); addToTable(metaData, "a", 1, timeKeeper); assertEquals(1, metaData.size()); addToTable(metaData, "b", 1, timeKeeper); assertEquals(1, metaData.size()); addToTable(metaData, "c", 5, timeKeeper); assertEquals(1, metaData.size()); addToTable(metaData, "d", 20, timeKeeper); assertEquals(1, metaData.size()); assertNames(metaData, "d"); addToTable(metaData, "e", 1, timeKeeper); assertEquals(1, metaData.size()); addToTable(metaData, "f", 2, timeKeeper); assertEquals(1, metaData.size()); assertNames(metaData, "f"); }
Example #8
Source File: DropIndexDuringUpsertIT.java From phoenix with Apache License 2.0 | 6 votes |
@Before public void doSetup() throws Exception { Configuration conf = HBaseConfiguration.create(); setUpConfigForMiniCluster(conf); conf.setInt("hbase.client.retries.number", 2); conf.setInt("hbase.client.pause", 5000); conf.setInt("hbase.balancer.period", Integer.MAX_VALUE); conf.setLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB, 0); util = new HBaseTestingUtility(conf); util.startMiniCluster(NUM_SLAVES); String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB); url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; Map<String, String> props = Maps.newHashMapWithExpectedSize(1); // Must update config before starting server props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true)); driver = initAndRegisterTestDriver(url, new ReadOnlyProps(props.entrySet().iterator())); }
Example #9
Source File: PMetaDataImplTest.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testAge() throws Exception { TestTimeKeeper timeKeeper = new TestTimeKeeper(); Map<String, String> props = Maps.newHashMapWithExpectedSize(2); props.put(QueryServices.MAX_CLIENT_METADATA_CACHE_SIZE_ATTRIB, "10"); props.put(QueryServices.CLIENT_CACHE_ENCODING, "object"); PMetaData metaData = new PMetaDataImpl(5, timeKeeper, new ReadOnlyProps(props)); String tableName = "a"; addToTable(metaData, tableName, 1, timeKeeper); PTableRef aTableRef = metaData.getTableRef(new PTableKey(null,tableName)); assertNotNull(aTableRef); assertEquals(1, metaData.getAge(aTableRef)); tableName = "b"; addToTable(metaData, tableName, 1, timeKeeper); PTableRef bTableRef = metaData.getTableRef(new PTableKey(null,tableName)); assertNotNull(bTableRef); assertEquals(1, metaData.getAge(bTableRef)); assertEquals(2, metaData.getAge(aTableRef)); }
Example #10
Source File: MutableIndexFailureIT.java From phoenix with Apache License 2.0 | 6 votes |
@Before public void doSetup() throws Exception { Configuration conf = HBaseConfiguration.create(); setUpConfigForMiniCluster(conf); conf.setInt("hbase.client.retries.number", 2); conf.setInt("hbase.client.pause", 5000); conf.setInt("hbase.balancer.period", Integer.MAX_VALUE); conf.setLong(QueryServices.INDEX_FAILURE_HANDLING_REBUILD_OVERLAP_TIME_ATTRIB, 0); conf.set(CoprocessorHost.MASTER_COPROCESSOR_CONF_KEY, IndexMasterObserver.class.getName()); conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS, IndexLoadBalancer.class, LoadBalancer.class); util = new HBaseTestingUtility(conf); util.startMiniCluster(NUM_SLAVES); String clientPort = util.getConfiguration().get(QueryServices.ZOOKEEPER_PORT_ATTRIB); url = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + LOCALHOST + JDBC_PROTOCOL_SEPARATOR + clientPort + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; driver = initAndRegisterDriver(url, ReadOnlyProps.EMPTY_PROPS); }
Example #11
Source File: SecureUserConnectionsIT.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testMultipleConnectionsAsSameUser() throws Exception { final HashSet<ConnectionInfo> connections = new HashSet<>(); final String princ1 = getUserPrincipal(1); final File keytab1 = getUserKeytabFile(1); final String url = joinUserAuthentication(BASE_URL, princ1, keytab1); UserGroupInformation.loginUserFromKeytab(princ1, keytab1.getPath()); // Using the same UGI should result in two equivalent ConnectionInfo objects connections.add(ConnectionInfo.create(url).normalize(ReadOnlyProps.EMPTY_PROPS, EMPTY_PROPERTIES)); assertEquals(1, connections.size()); // Sanity check verifyAllConnectionsAreKerberosBased(connections); // Because the UGI instances are unique, so are the connections connections.add(ConnectionInfo.create(url).normalize(ReadOnlyProps.EMPTY_PROPS, EMPTY_PROPERTIES)); assertEquals(1, connections.size()); }
Example #12
Source File: SystemCatalogCreationOnConnectionIT.java From phoenix with Apache License 2.0 | 6 votes |
private SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver firstConnectionNSMappingServerDisabledClientEnabled() throws Exception { startMiniClusterWithToggleNamespaceMapping(Boolean.FALSE.toString()); Properties clientProps = getClientProperties(true, true); SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = new SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS); try { driver.getConnectionQueryServices(getJdbcUrl(), clientProps); fail("Client should not be able to connect to cluster with inconsistent client-server namespace mapping properties"); } catch (SQLException sqlE) { assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(), sqlE.getErrorCode()); } hbaseTables = getHBaseTables(); assertEquals(0, hbaseTables.size()); assertEquals(0, countUpgradeAttempts); assertFalse(isSystemNamespaceCreated()); return driver; }
Example #13
Source File: ViewIT.java From phoenix with Apache License 2.0 | 6 votes |
@BeforeClass public static synchronized void doSetup() throws Exception { NUM_SLAVES_BASE = 6; Map<String, String> props = Collections.emptyMap(); boolean splitSystemCatalog = (driver == null); Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(1); serverProps.put(QueryServices.PHOENIX_ACLS_ENABLED, "true"); serverProps.put(PhoenixMetaDataCoprocessorHost.PHOENIX_META_DATA_COPROCESSOR_CONF_KEY, TestMetaDataRegionObserver.class.getName()); serverProps.put("hbase.coprocessor.abortonerror", "false"); setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(props.entrySet().iterator())); // Split SYSTEM.CATALOG once after the mini-cluster is started if (splitSystemCatalog) { splitSystemCatalog(); } }
Example #14
Source File: ParallelStatsEnabledIT.java From phoenix with Apache License 2.0 | 6 votes |
@BeforeClass public static final void doSetup() throws Exception { Map<String, String> props = Maps.newHashMapWithExpectedSize(1); props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20)); props.put(QueryServices.STATS_UPDATE_FREQ_MS_ATTRIB, Long.toString(5)); props.put(QueryServices.MAX_SERVER_METADATA_CACHE_TIME_TO_LIVE_MS_ATTRIB, Long.toString(5)); props.put(QueryServices.USE_STATS_FOR_PARALLELIZATION, Boolean.toString(true)); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); TaskRegionEnvironment = getUtility() .getRSForFirstRegionInTable( PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME) .getRegions(PhoenixDatabaseMetaData.SYSTEM_TASK_HBASE_TABLE_NAME) .get(0).getCoprocessorHost() .findCoprocessorEnvironment(TaskRegionObserver.class.getName()); }
Example #15
Source File: SystemCatalogCreationOnConnectionIT.java From phoenix with Apache License 2.0 | 6 votes |
private SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver firstConnectionNSMappingServerEnabledClientDisabled() throws Exception { startMiniClusterWithToggleNamespaceMapping(Boolean.TRUE.toString()); Properties clientProps = getClientProperties(false, false); SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver driver = new SystemCatalogCreationOnConnectionIT.PhoenixSysCatCreationTestingDriver(ReadOnlyProps.EMPTY_PROPS); try { driver.getConnectionQueryServices(getJdbcUrl(), clientProps); fail("Client should not be able to connect to cluster with inconsistent client-server namespace mapping properties"); } catch (SQLException sqlE) { assertEquals(SQLExceptionCode.INCONSISTENT_NAMESPACE_MAPPING_PROPERTIES.getErrorCode(), sqlE.getErrorCode()); } hbaseTables = getHBaseTables(); assertEquals(0, hbaseTables.size()); assertEquals(0, countUpgradeAttempts); return driver; }
Example #16
Source File: MetaDataRegionObserver.java From phoenix with Apache License 2.0 | 6 votes |
public BuildIndexScheduleTask(RegionCoprocessorEnvironment env, List<String> onlyTheseTables) { this.onlyTheseTables = onlyTheseTables == null ? null : ImmutableList.copyOf(onlyTheseTables); this.env = env; Configuration configuration = env.getConfiguration(); this.rebuildIndexBatchSize = configuration.getLong( QueryServices.INDEX_FAILURE_HANDLING_REBUILD_PERIOD, HConstants.LATEST_TIMESTAMP); this.configuredBatches = configuration.getLong( QueryServices.INDEX_FAILURE_HANDLING_REBUILD_NUMBER_OF_BATCHES_PER_TABLE, 10); this.indexDisableTimestampThreshold = configuration.getLong(QueryServices.INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD, QueryServicesOptions.DEFAULT_INDEX_REBUILD_DISABLE_TIMESTAMP_THRESHOLD); this.pendingDisableThreshold = configuration.getLong(QueryServices.INDEX_PENDING_DISABLE_THRESHOLD, QueryServicesOptions.DEFAULT_INDEX_PENDING_DISABLE_THRESHOLD); this.props = new ReadOnlyProps(env.getConfiguration().iterator()); }
Example #17
Source File: MutableIndexReplicationIT.java From phoenix with Apache License 2.0 | 5 votes |
private static void setupDriver() throws Exception { LOGGER.info("Setting up phoenix driver"); Map<String, String> props = Maps.newHashMapWithExpectedSize(3); // Forces server cache to be used props.put(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(2)); props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true)); // Must update config before starting server URL = getLocalClusterUrl(utility1); LOGGER.info("Connecting driver to "+URL); driver = initAndRegisterTestDriver(URL, new ReadOnlyProps(props.entrySet().iterator())); }
Example #18
Source File: BaseTest.java From phoenix with Apache License 2.0 | 5 votes |
/** * Create a {@link PhoenixTestDriver} and register it. * @return an initialized and registered {@link PhoenixTestDriver} */ protected static PhoenixTestDriver initAndRegisterDriver(String url, ReadOnlyProps props) throws Exception { PhoenixTestDriver newDriver = new PhoenixTestDriver(props); DriverManager.registerDriver(newDriver); Driver oldDriver = DriverManager.getDriver(url); if (oldDriver != newDriver) { destroyDriver(oldDriver); } Connection conn = newDriver.connect(url, PropertiesUtil.deepCopy(TEST_PROPERTIES)); conn.close(); return newDriver; }
Example #19
Source File: BaseTest.java From phoenix with Apache License 2.0 | 5 votes |
public static Configuration setUpConfigForMiniCluster(Configuration conf, ReadOnlyProps overrideProps) { assertNotNull(conf); setDefaultTestConfig(conf, overrideProps); /* * The default configuration of mini cluster ends up spawning a lot of threads * that are not really needed by phoenix for test purposes. Limiting these threads * helps us in running several mini clusters at the same time without hitting * the threads limit imposed by the OS. */ conf.setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 5); conf.setInt("hbase.regionserver.metahandler.count", 2); conf.setInt(HConstants.MASTER_HANDLER_COUNT, 2); conf.setClass("hbase.coprocessor.regionserver.classes", LocalIndexMerger.class, RegionServerObserver.class); conf.setInt("dfs.namenode.handler.count", 2); conf.setInt("dfs.namenode.service.handler.count", 2); conf.setInt("dfs.datanode.handler.count", 2); conf.setInt("ipc.server.read.threadpool.size", 2); conf.setInt("ipc.server.handler.threadpool.size", 2); conf.setInt("hbase.hconnection.threads.max", 2); conf.setInt("hbase.hconnection.threads.core", 2); conf.setInt("hbase.htable.threads.max", 2); conf.setInt("hbase.regionserver.hlog.syncer.count", 2); conf.setInt("hbase.hlog.asyncer.number", 2); conf.setInt("hbase.assignment.zkevent.workers", 5); conf.setInt("hbase.assignment.threads.max", 5); conf.setInt("hbase.catalogjanitor.interval", 5000); return conf; }
Example #20
Source File: SplitSystemCatalogIT.java From phoenix with Apache License 2.0 | 5 votes |
@BeforeClass public static synchronized void doSetup() throws Exception { NUM_SLAVES_BASE = 6; Map<String, String> props = Collections.emptyMap(); boolean splitSystemCatalog = (driver == null); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); // Split SYSTEM.CATALOG once after the mini-cluster is started if (splitSystemCatalog) { splitSystemCatalog(); } }
Example #21
Source File: QueryTimeoutIT.java From phoenix with Apache License 2.0 | 5 votes |
@BeforeClass public static void doSetup() throws Exception { Map<String,String> props = Maps.newHashMapWithExpectedSize(3); // Must update config before starting server props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(700)); props.put(QueryServices.QUEUE_SIZE_ATTRIB, Integer.toString(10000)); props.put(QueryServices.EXPLAIN_CHUNK_COUNT_ATTRIB, Boolean.TRUE.toString()); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); }
Example #22
Source File: BaseTenantSpecificTablesIT.java From phoenix with Apache License 2.0 | 5 votes |
@BeforeClass public static void doSetup() throws Exception { Map<String,String> props = Maps.newHashMapWithExpectedSize(3); // Must update config before starting server props.put(QueryServices.STATS_GUIDEPOST_WIDTH_BYTES_ATTRIB, Long.toString(20)); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); PHOENIX_JDBC_TENANT_SPECIFIC_URL = getUrl() + ';' + TENANT_ID_ATTRIB + '=' + TENANT_ID; PHOENIX_JDBC_TENANT_SPECIFIC_URL2 = getUrl() + ';' + TENANT_ID_ATTRIB + '=' + TENANT_ID2; }
Example #23
Source File: StatementContext.java From phoenix with Apache License 2.0 | 5 votes |
public StatementContext(PhoenixStatement statement, ColumnResolver resolver, BindManager binds, Scan scan, SequenceManager seqManager, boolean isRequestMetricsEnabled) { this.statement = statement; this.resolver = resolver; this.scan = scan; this.sequences = seqManager; this.binds = binds; this.aggregates = new AggregationManager(); this.expressions = new ExpressionManager(); PhoenixConnection connection = statement.getConnection(); ReadOnlyProps props = connection.getQueryServices().getProps(); String timeZoneID = props.get(QueryServices.DATE_FORMAT_TIMEZONE_ATTRIB, DateUtil.DEFAULT_TIME_ZONE_ID); this.dateFormat = props.get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT); this.dateFormatter = DateUtil.getDateFormatter(dateFormat, timeZoneID); this.timeFormat = props.get(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT); this.timeFormatter = DateUtil.getTimeFormatter(timeFormat, timeZoneID); this.timestampFormat = props.get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, DateUtil.DEFAULT_TIMESTAMP_FORMAT); this.timestampFormatter = DateUtil.getTimestampFormatter(timestampFormat, timeZoneID); this.dateFormatTimeZone = DateUtil.getTimeZone(timeZoneID); this.numberFormat = props.get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT); this.tempPtr = new ImmutableBytesWritable(); this.currentTable = resolver != null && !resolver.getTables().isEmpty() ? resolver.getTables().get(0) : null; this.whereConditionColumns = new ArrayList<Pair<byte[], byte[]>>(); this.dataColumns = this.currentTable == null ? Collections.<PColumn, Integer> emptyMap() : Maps .<PColumn, Integer> newLinkedHashMap(); this.subqueryResults = Maps.<SelectStatement, Object> newHashMap(); this.readMetricsQueue = new ReadMetricQueue(isRequestMetricsEnabled,connection.getLogLevel()); this.overAllQueryMetrics = new OverAllQueryMetrics(isRequestMetricsEnabled,connection.getLogLevel()); this.retryingPersistentCache = Maps.<Long, Boolean> newHashMap(); }
Example #24
Source File: IndexExtendedIT.java From phoenix with Apache License 2.0 | 5 votes |
@BeforeClass public static synchronized void doSetup() throws Exception { Map<String, String> serverProps = Maps.newHashMapWithExpectedSize(2); serverProps.put(QueryServices.EXTRA_JDBC_ARGUMENTS_ATTRIB, QueryServicesOptions.DEFAULT_EXTRA_JDBC_ARGUMENTS); Map<String, String> clientProps = Maps.newHashMapWithExpectedSize(2); clientProps.put(QueryServices.TRANSACTIONS_ENABLED, Boolean.TRUE.toString()); clientProps.put(QueryServices.FORCE_ROW_KEY_ORDER_ATTRIB, Boolean.TRUE.toString()); setUpTestDriver(new ReadOnlyProps(serverProps.entrySet().iterator()), new ReadOnlyProps(clientProps.entrySet() .iterator())); }
Example #25
Source File: PTableRefFactory.java From phoenix with Apache License 2.0 | 5 votes |
public static PTableRefFactory getFactory(ReadOnlyProps props) { String encodingEnumString = props.get(QueryServices.CLIENT_CACHE_ENCODING, QueryServicesOptions.DEFAULT_CLIENT_CACHE_ENCODING); Encoding encoding = Encoding.valueOf(encodingEnumString.toUpperCase()); switch (encoding) { case PROTOBUF: return SerializedPTableRefFactory.getFactory(); case OBJECT: default: return INSTANCE; } }
Example #26
Source File: ConnectionQueryServicesImplTest.java From phoenix with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Test public void testExceptionHandlingOnSystemNamespaceCreation() throws Exception { ConnectionQueryServicesImpl cqs = mock(ConnectionQueryServicesImpl.class); // Invoke the real methods for these two calls when(cqs.createSchema(any(List.class), anyString())).thenCallRealMethod(); doCallRealMethod().when(cqs).ensureSystemTablesMigratedToSystemNamespace(); // Do nothing for this method, just check that it was invoked later doNothing().when(cqs).createSysMutexTableIfNotExists(any(Admin.class)); // Spoof out this call so that ensureSystemTablesUpgrade() will return-fast. when(cqs.getSystemTableNamesInDefaultNamespace(any(Admin.class))).thenReturn(Collections.<TableName> emptyList()); // Throw a special exception to check on later doThrow(PHOENIX_IO_EXCEPTION).when(cqs).ensureNamespaceCreated(anyString()); // Make sure that ensureSystemTablesMigratedToSystemNamespace will try to migrate the system tables. Map<String,String> props = new HashMap<>(); props.put(QueryServices.IS_NAMESPACE_MAPPING_ENABLED, "true"); when(cqs.getProps()).thenReturn(new ReadOnlyProps(props)); cqs.ensureSystemTablesMigratedToSystemNamespace(); // Should be called after upgradeSystemTables() // Proves that execution proceeded verify(cqs).getSystemTableNamesInDefaultNamespace(any(Admin.class)); try { // Verifies that the exception is propagated back to the caller cqs.createSchema(Collections.<Mutation> emptyList(), ""); } catch (PhoenixIOException e) { assertEquals(PHOENIX_IO_EXCEPTION, e); } }
Example #27
Source File: SecureUserConnectionsIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testHostSubstitutionInUrl() throws Exception { final HashSet<ConnectionInfo> connections = new HashSet<>(); final String princ1 = getServicePrincipal(1); final File keytab1 = getServiceKeytabFile(1); final String princ2 = getServicePrincipal(2); final File keytab2 = getServiceKeytabFile(2); final String url1 = joinUserAuthentication(BASE_URL, princ1, keytab1); final String url2 = joinUserAuthentication(BASE_URL, princ2, keytab2); // Using the same UGI should result in two equivalent ConnectionInfo objects connections.add(ConnectionInfo.create(url1).normalize(ReadOnlyProps.EMPTY_PROPS, EMPTY_PROPERTIES)); assertEquals(1, connections.size()); // Sanity check verifyAllConnectionsAreKerberosBased(connections); // Logging in as the same user again should not duplicate connections connections.add(ConnectionInfo.create(url1).normalize(ReadOnlyProps.EMPTY_PROPS, EMPTY_PROPERTIES)); assertEquals(1, connections.size()); // Sanity check verifyAllConnectionsAreKerberosBased(connections); // Add a second one. connections.add(ConnectionInfo.create(url2).normalize(ReadOnlyProps.EMPTY_PROPS, EMPTY_PROPERTIES)); assertEquals(2, connections.size()); verifyAllConnectionsAreKerberosBased(connections); // Again, verify this user is not duplicated connections.add(ConnectionInfo.create(url2).normalize(ReadOnlyProps.EMPTY_PROPS, EMPTY_PROPERTIES)); assertEquals(2, connections.size()); verifyAllConnectionsAreKerberosBased(connections); // Because the UGI instances are unique, so are the connections connections.add(ConnectionInfo.create(url1).normalize(ReadOnlyProps.EMPTY_PROPS, EMPTY_PROPERTIES)); assertEquals(3, connections.size()); verifyAllConnectionsAreKerberosBased(connections); }
Example #28
Source File: GlobalIndexOptimizationIT.java From phoenix with Apache License 2.0 | 5 votes |
@BeforeClass @Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class) public static void doSetup() throws Exception { Map<String,String> props = Maps.newHashMapWithExpectedSize(3); // Drop the HBase table metadata for this test props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true)); // Must update config before starting server setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); }
Example #29
Source File: BaseMutableIndexIT.java From phoenix with Apache License 2.0 | 5 votes |
@BeforeClass @Shadower(classBeingShadowed = BaseHBaseManagedTimeIT.class) public static void doSetup() throws Exception { Map<String,String> props = Maps.newHashMapWithExpectedSize(3); // Don't split intra region so we can more easily know that the n-way parallelization is for the explain plan // Forces server cache to be used props.put(QueryServices.INDEX_MUTATE_BATCH_SIZE_THRESHOLD_ATTRIB, Integer.toString(2)); props.put(QueryServices.DROP_METADATA_ATTRIB, Boolean.toString(true)); setUpTestDriver(new ReadOnlyProps(props.entrySet().iterator())); }
Example #30
Source File: BaseTest.java From phoenix with Apache License 2.0 | 5 votes |
/** * Initialize the cluster in distributed mode * @param overrideProps TODO * @return url to be used by clients to connect to the mini cluster. * @throws Exception */ private static String initClusterDistributedMode(Configuration conf, ReadOnlyProps overrideProps) throws Exception { setTestConfigForDistribuedCluster(conf, overrideProps); try { IntegrationTestingUtility util = new IntegrationTestingUtility(conf); utility = util; util.initializeCluster(NUM_SLAVES_BASE); } catch (Exception e) { throw new RuntimeException(e); } return JDBC_PROTOCOL + JDBC_PROTOCOL_TERMINATOR + PHOENIX_TEST_DRIVER_URL_PARAM; }