Java Code Examples for org.apache.flink.table.api.TableEnvironment#useCatalog()

The following examples show how to use org.apache.flink.table.api.TableEnvironment#useCatalog() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HiveTableSinkITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testBatchAppend() {
	TableEnvironment tEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode(SqlDialect.HIVE);
	tEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
	tEnv.useCatalog(hiveCatalog.getName());
	tEnv.executeSql("create database db1");
	tEnv.useDatabase("db1");
	try {
		tEnv.executeSql("create table append_table (i int, j int)");
		TableEnvUtil.execInsertSqlAndWaitResult(tEnv, "insert into append_table select 1, 1");
		TableEnvUtil.execInsertSqlAndWaitResult(tEnv, "insert into append_table select 2, 2");
		ArrayList<Row> rows = Lists.newArrayList(tEnv.executeSql("select * from append_table").collect());
		rows.sort(Comparator.comparingInt(o -> (int) o.getField(0)));
		Assert.assertEquals(Arrays.asList(Row.of(1, 1), Row.of(2, 2)), rows);
	} finally {
		tEnv.executeSql("drop database db1 cascade");
	}
}
 
Example 2
Source File: HiveCatalogUseBlinkITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testTimestampUDF() throws Exception {

	TableEnvironment tableEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode(SqlDialect.HIVE);
	tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
	tableEnv.useCatalog(hiveCatalog.getName());
	tableEnv.executeSql(String.format("create function myyear as '%s'", UDFYear.class.getName()));
	tableEnv.executeSql("create table src(ts timestamp)");
	try {
		HiveTestUtils.createTextTableInserter(hiveShell, "default", "src")
				.addRow(new Object[]{Timestamp.valueOf("2013-07-15 10:00:00")})
				.addRow(new Object[]{Timestamp.valueOf("2019-05-23 17:32:55")})
				.commit();

		List<Row> results = Lists.newArrayList(
				tableEnv.sqlQuery("select myyear(ts) as y from src").execute().collect());
		Assert.assertEquals(2, results.size());
		Assert.assertEquals("[2013, 2019]", results.toString());
	} finally {
		tableEnv.executeSql("drop table src");
	}
}
 
Example 3
Source File: CatalogITest.java    From pulsar-flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testCatalogs() throws Exception {
    String inmemoryCatalog = "inmemorycatalog";
    String pulsarCatalog1 = "pulsarcatalog1";
    String pulsarCatalog2 = "pulsarcatalog2";

    ExecutionContext context = createExecutionContext(CATALOGS_ENVIRONMENT_FILE, getStreamingConfs());
    TableEnvironment tableEnv = context.createEnvironmentInstance().getTableEnvironment();

    assertEquals(tableEnv.getCurrentCatalog(), inmemoryCatalog);
    assertEquals(tableEnv.getCurrentDatabase(), "mydatabase");

    Catalog catalog = tableEnv.getCatalog(pulsarCatalog1).orElse(null);
    assertNotNull(catalog);
    assertTrue(catalog instanceof PulsarCatalog);
    tableEnv.useCatalog(pulsarCatalog1);
    assertEquals(tableEnv.getCurrentDatabase(), "public/default");

    catalog = tableEnv.getCatalog(pulsarCatalog2).orElse(null);
    assertNotNull(catalog);
    assertTrue(catalog instanceof PulsarCatalog);
    tableEnv.useCatalog(pulsarCatalog2);
    assertEquals(tableEnv.getCurrentDatabase(), "tn/ns");
}
 
Example 4
Source File: HiveCatalogITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testTableWithPrimaryKey() {
	EnvironmentSettings.Builder builder = EnvironmentSettings.newInstance().useBlinkPlanner();
	EnvironmentSettings settings = builder.build();
	TableEnvironment tableEnv = TableEnvironment.create(settings);
	tableEnv.getConfig().getConfiguration().setInteger(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1);

	tableEnv.registerCatalog("catalog1", hiveCatalog);
	tableEnv.useCatalog("catalog1");

	final String createTable = "CREATE TABLE pk_src (\n" +
			"  uuid varchar(40) not null,\n" +
			"  price DECIMAL(10, 2),\n" +
			"  currency STRING,\n" +
			"  ts6 TIMESTAMP(6),\n" +
			"  ts AS CAST(ts6 AS TIMESTAMP(3)),\n" +
			"  WATERMARK FOR ts AS ts,\n" +
			"  constraint ct1 PRIMARY KEY(uuid) NOT ENFORCED)\n" +
			"  WITH (\n" +
			"    'connector.type' = 'filesystem'," +
			"    'connector.path' = 'file://fakePath'," +
			"    'format.type' = 'csv')";

	tableEnv.executeSql(createTable);

	TableSchema tableSchema = tableEnv.getCatalog(tableEnv.getCurrentCatalog())
			.map(catalog -> {
				try {
					final ObjectPath tablePath = ObjectPath.fromString(catalog.getDefaultDatabase() + '.' + "pk_src");
					return catalog.getTable(tablePath).getSchema();
				} catch (TableNotExistException e) {
					return null;
				}
			}).orElse(null);
	assertNotNull(tableSchema);
	assertEquals(
			tableSchema.getPrimaryKey(),
			Optional.of(UniqueConstraint.primaryKey("ct1", Collections.singletonList("uuid"))));
	tableEnv.executeSql("DROP TABLE pk_src");
}
 
Example 5
Source File: HiveTableSourceITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
private void testSourceConfig(boolean fallbackMR, boolean inferParallelism) throws Exception {
	HiveTableFactory tableFactorySpy = spy((HiveTableFactory) hiveCatalog.getTableFactory().get());

	doAnswer(invocation -> {
		TableSourceFactory.Context context = invocation.getArgument(0);
		return new TestConfigSource(
				new JobConf(hiveCatalog.getHiveConf()),
				context.getConfiguration(),
				context.getObjectIdentifier().toObjectPath(),
				context.getTable(),
				fallbackMR,
				inferParallelism);
	}).when(tableFactorySpy).createTableSource(any(TableSourceFactory.Context.class));

	HiveCatalog catalogSpy = spy(hiveCatalog);
	doReturn(Optional.of(tableFactorySpy)).when(catalogSpy).getTableFactory();

	TableEnvironment tableEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode();
	tableEnv.getConfig().getConfiguration().setBoolean(
			HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_READER, fallbackMR);
	tableEnv.getConfig().getConfiguration().setBoolean(
			HiveOptions.TABLE_EXEC_HIVE_INFER_SOURCE_PARALLELISM, inferParallelism);
	tableEnv.getConfig().getConfiguration().setInteger(
			ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 2);
	tableEnv.registerCatalog(catalogSpy.getName(), catalogSpy);
	tableEnv.useCatalog(catalogSpy.getName());

	List<Row> results = Lists.newArrayList(
			tableEnv.sqlQuery("select * from db1.src order by x").execute().collect());
	assertEquals("[1,a, 2,b]", results.toString());
}
 
Example 6
Source File: HiveTableSourceITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testPartitionFilterDateTimestamp() throws Exception {
	TableEnvironment tableEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode(SqlDialect.HIVE);
	TestPartitionFilterCatalog catalog = new TestPartitionFilterCatalog(
			hiveCatalog.getName(), hiveCatalog.getDefaultDatabase(), hiveCatalog.getHiveConf(), hiveCatalog.getHiveVersion());
	tableEnv.registerCatalog(catalog.getName(), catalog);
	tableEnv.useCatalog(catalog.getName());
	tableEnv.executeSql("create database db1");
	try {
		tableEnv.executeSql("create table db1.part(x int) partitioned by (p1 date,p2 timestamp)");
		HiveTestUtils.createTextTableInserter(hiveShell, "db1", "part")
				.addRow(new Object[]{1}).commit("p1='2018-08-08',p2='2018-08-08 08:08:08'");
		HiveTestUtils.createTextTableInserter(hiveShell, "db1", "part")
				.addRow(new Object[]{2}).commit("p1='2018-08-09',p2='2018-08-08 08:08:09'");
		HiveTestUtils.createTextTableInserter(hiveShell, "db1", "part")
				.addRow(new Object[]{3}).commit("p1='2018-08-10',p2='2018-08-08 08:08:10'");

		Table query = tableEnv.sqlQuery(
				"select x from db1.part where p1>cast('2018-08-09' as date) and p2<>cast('2018-08-08 08:08:09' as timestamp)");
		String[] explain = query.explain().split("==.*==\n");
		assertTrue(catalog.fallback);
		String optimizedPlan = explain[2];
		assertTrue(optimizedPlan, optimizedPlan.contains("PartitionPruned: true, PartitionNums: 1"));
		List<Row> results = Lists.newArrayList(query.execute().collect());
		assertEquals("[3]", results.toString());
		System.out.println(results);
	} finally {
		tableEnv.executeSql("drop database db1 cascade");
	}
}
 
Example 7
Source File: TableEnvHiveConnectorITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
private TableEnvironment getStreamTableEnvWithHiveCatalog() {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	TableEnvironment tableEnv = HiveTestUtils.createTableEnvWithBlinkPlannerStreamMode(env, SqlDialect.HIVE);
	tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
	tableEnv.useCatalog(hiveCatalog.getName());
	return tableEnv;
}
 
Example 8
Source File: HiveCatalogITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testNewTableFactory() {
	TableEnvironment tEnv = TableEnvironment.create(
			EnvironmentSettings.newInstance().inBatchMode().build());
	tEnv.registerCatalog("myhive", hiveCatalog);
	tEnv.useCatalog("myhive");
	tEnv.getConfig().getConfiguration().set(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1);

	String path = this.getClass().getResource("/csv/test.csv").getPath();

	PrintStream originalSystemOut = System.out;
	try {
		ByteArrayOutputStream arrayOutputStream = new ByteArrayOutputStream();
		System.setOut(new PrintStream(arrayOutputStream));

		tEnv.executeSql("create table csv_table (name String, age Int) with (" +
				"'connector.type' = 'filesystem'," +
				"'connector.path' = 'file://" + path + "'," +
				"'format.type' = 'csv')");
		tEnv.executeSql("create table print_table (name String, age Int) with ('connector' = 'print')");

		TableEnvUtil.execInsertSqlAndWaitResult(tEnv, "insert into print_table select * from csv_table");

		// assert query result
		assertEquals("+I(1,1)\n+I(2,2)\n+I(3,3)\n", arrayOutputStream.toString());
	} finally {
		if (System.out != originalSystemOut) {
			System.out.close();
		}
		System.setOut(originalSystemOut);
		tEnv.executeSql("DROP TABLE csv_table");
		tEnv.executeSql("DROP TABLE print_table");
	}
}
 
Example 9
Source File: HiveCatalogITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testReadWriteCsv() throws Exception {
	// similar to CatalogTableITCase::testReadWriteCsvUsingDDL but uses HiveCatalog
	EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
	TableEnvironment tableEnv = TableEnvironment.create(settings);
	tableEnv.getConfig().getConfiguration().setInteger(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1);

	tableEnv.registerCatalog("myhive", hiveCatalog);
	tableEnv.useCatalog("myhive");

	String srcPath = this.getClass().getResource("/csv/test3.csv").getPath();

	tableEnv.executeSql("CREATE TABLE src (" +
			"price DECIMAL(10, 2),currency STRING,ts6 TIMESTAMP(6),ts AS CAST(ts6 AS TIMESTAMP(3)),WATERMARK FOR ts AS ts) " +
			String.format("WITH ('connector.type' = 'filesystem','connector.path' = 'file://%s','format.type' = 'csv')", srcPath));

	String sinkPath = new File(tempFolder.newFolder(), "csv-order-sink").toURI().toString();

	tableEnv.executeSql("CREATE TABLE sink (" +
			"window_end TIMESTAMP(3),max_ts TIMESTAMP(6),counter BIGINT,total_price DECIMAL(10, 2)) " +
			String.format("WITH ('connector.type' = 'filesystem','connector.path' = '%s','format.type' = 'csv')", sinkPath));

	TableEnvUtil.execInsertSqlAndWaitResult(tableEnv, "INSERT INTO sink " +
			"SELECT TUMBLE_END(ts, INTERVAL '5' SECOND),MAX(ts6),COUNT(*),MAX(price) FROM src " +
			"GROUP BY TUMBLE(ts, INTERVAL '5' SECOND)");

	String expected = "2019-12-12 00:00:05.0,2019-12-12 00:00:04.004001,3,50.00\n" +
			"2019-12-12 00:00:10.0,2019-12-12 00:00:06.006001,2,5.33\n";
	assertEquals(expected, FileUtils.readFileUtf8(new File(new URI(sinkPath))));
}
 
Example 10
Source File: HiveTableSinkITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
private void assertBatch(String table, List<String> expected) {
	// using batch table env to query.
	List<String> results = new ArrayList<>();
	TableEnvironment batchTEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode();
	batchTEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
	batchTEnv.useCatalog(hiveCatalog.getName());
	batchTEnv.executeSql("select * from " + table).collect()
			.forEachRemaining(r -> results.add(r.toString()));
	results.sort(String::compareTo);
	expected.sort(String::compareTo);
	Assert.assertEquals(expected, results);
}
 
Example 11
Source File: HiveTableSinkITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testWriteNullValues() throws Exception {
	TableEnvironment tableEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode(SqlDialect.HIVE);
	tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
	tableEnv.useCatalog(hiveCatalog.getName());
	tableEnv.executeSql("create database db1");
	try {
		// 17 data types
		tableEnv.executeSql("create table db1.src" +
				"(t tinyint,s smallint,i int,b bigint,f float,d double,de decimal(10,5),ts timestamp,dt date," +
				"str string,ch char(5),vch varchar(8),bl boolean,bin binary,arr array<int>,mp map<int,string>,strt struct<f1:int,f2:string>)");
		HiveTestUtils.createTextTableInserter(hiveShell, "db1", "src")
				.addRow(new Object[]{null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null, null})
				.commit();
		hiveShell.execute("create table db1.dest like db1.src");

		TableEnvUtil.execInsertSqlAndWaitResult(tableEnv, "insert into db1.dest select * from db1.src");
		List<String> results = hiveShell.executeQuery("select * from db1.dest");
		assertEquals(1, results.size());
		String[] cols = results.get(0).split("\t");
		assertEquals(17, cols.length);
		assertEquals("NULL", cols[0]);
		assertEquals(1, new HashSet<>(Arrays.asList(cols)).size());
	} finally {
		tableEnv.executeSql("drop database db1 cascade");
	}
}
 
Example 12
Source File: ExecutionContextTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testDatabases() throws Exception {
	final String hiveCatalog = "hivecatalog";

	final ExecutionContext<?> context = createCatalogExecutionContext();
	final TableEnvironment tableEnv = context.createEnvironmentInstance().getTableEnvironment();

	assertEquals(1, tableEnv.listDatabases().length);
	assertEquals("mydatabase", tableEnv.listDatabases()[0]);

	tableEnv.useCatalog(hiveCatalog);

	assertEquals(2, tableEnv.listDatabases().length);
	assertEquals(
		new HashSet<>(
			Arrays.asList(
				HiveCatalog.DEFAULT_DB,
				DependencyTest.TestHiveCatalogFactory.ADDITIONAL_TEST_DATABASE)
		),
		new HashSet<>(Arrays.asList(tableEnv.listDatabases()))
	);

	tableEnv.useCatalog(hiveCatalog);

	assertEquals(HiveCatalog.DEFAULT_DB, tableEnv.getCurrentDatabase());

	tableEnv.useDatabase(DependencyTest.TestHiveCatalogFactory.ADDITIONAL_TEST_DATABASE);

	assertEquals(DependencyTest.TestHiveCatalogFactory.ADDITIONAL_TEST_DATABASE, tableEnv.getCurrentDatabase());
}
 
Example 13
Source File: ExecutionContextTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testDatabases() throws Exception {
	final String hiveCatalog = "hivecatalog";

	final ExecutionContext<?> context = createCatalogExecutionContext();
	final TableEnvironment tableEnv = context.getTableEnvironment();

	assertEquals(1, tableEnv.listDatabases().length);
	assertEquals("mydatabase", tableEnv.listDatabases()[0]);

	tableEnv.useCatalog(hiveCatalog);

	assertEquals(2, tableEnv.listDatabases().length);
	assertEquals(
		new HashSet<>(
			Arrays.asList(
				HiveCatalog.DEFAULT_DB,
				DependencyTest.TestHiveCatalogFactory.ADDITIONAL_TEST_DATABASE)
		),
		new HashSet<>(Arrays.asList(tableEnv.listDatabases()))
	);

	tableEnv.useCatalog(hiveCatalog);

	assertEquals(HiveCatalog.DEFAULT_DB, tableEnv.getCurrentDatabase());

	tableEnv.useDatabase(DependencyTest.TestHiveCatalogFactory.ADDITIONAL_TEST_DATABASE);

	assertEquals(DependencyTest.TestHiveCatalogFactory.ADDITIONAL_TEST_DATABASE, tableEnv.getCurrentDatabase());

	context.close();
}
 
Example 14
Source File: CatalogITest.java    From pulsar-flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testTableReadStartFromLatestByDefault() throws Exception {
    String pulsarCatalog1 = "pulsarcatalog1";

    String tableName = newTopic();

    sendTypedMessages(tableName, SchemaType.INT32, INTEGER_LIST, Optional.empty());

    ExecutionContext context = createExecutionContext(CATALOGS_ENVIRONMENT_FILE, getStreamingConfs());
    TableEnvironment tableEnv = context.createEnvironmentInstance().getTableEnvironment();

    tableEnv.useCatalog(pulsarCatalog1);

    Table t = tableEnv.scan(TopicName.get(tableName).getLocalName()).select("value");
    DataStream stream = ((StreamTableEnvironment) tableEnv).toAppendStream(t, t.getSchema().toRowType());
    stream.map(new FailingIdentityMapper<Row>(INTEGER_LIST.size()))
            .addSink(new SingletonStreamSink.StringSink<>()).setParallelism(1);

    Thread runner = new Thread("runner") {
        @Override
        public void run() {
            try {
                tableEnv.execute("read from latest");
            } catch (Throwable e) {
                // do nothing
            }
        }
    };

    runner.start();

    Thread.sleep(2000);
    sendTypedMessages(tableName, SchemaType.INT32, INTEGER_LIST, Optional.empty());

    Thread.sleep(2000);
    SingletonStreamSink.compareWithList(INTEGER_LIST.subList(0, INTEGER_LIST.size() - 1).stream().map(Objects::toString).collect(Collectors.toList()));
}
 
Example 15
Source File: ExecutionContextTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testCatalogs() throws Exception {
	final String inmemoryCatalog = "inmemorycatalog";
	final String hiveCatalog = "hivecatalog";
	final String hiveDefaultVersionCatalog = "hivedefaultversion";

	final ExecutionContext<?> context = createCatalogExecutionContext();
	final TableEnvironment tableEnv = context.getTableEnvironment();

	assertEquals(inmemoryCatalog, tableEnv.getCurrentCatalog());
	assertEquals("mydatabase", tableEnv.getCurrentDatabase());

	Catalog catalog = tableEnv.getCatalog(hiveCatalog).orElse(null);
	assertNotNull(catalog);
	assertTrue(catalog instanceof HiveCatalog);
	assertEquals("2.3.4", ((HiveCatalog) catalog).getHiveVersion());

	catalog = tableEnv.getCatalog(hiveDefaultVersionCatalog).orElse(null);
	assertNotNull(catalog);
	assertTrue(catalog instanceof HiveCatalog);
	// make sure we have assigned a default hive version
	assertFalse(StringUtils.isNullOrWhitespaceOnly(((HiveCatalog) catalog).getHiveVersion()));

	tableEnv.useCatalog(hiveCatalog);

	assertEquals(hiveCatalog, tableEnv.getCurrentCatalog());

	Set<String> allCatalogs = new HashSet<>(Arrays.asList(tableEnv.listCatalogs()));
	assertEquals(6, allCatalogs.size());
	assertEquals(
		new HashSet<>(
			Arrays.asList(
				"default_catalog",
				inmemoryCatalog,
				hiveCatalog,
				hiveDefaultVersionCatalog,
				"catalog1",
				"catalog2")
		),
		allCatalogs
	);

	context.close();
}
 
Example 16
Source File: HiveTestUtils.java    From flink with Apache License 2.0 4 votes vote down vote up
public static TableEnvironment createTableEnvWithHiveCatalog(HiveCatalog catalog) {
	TableEnvironment tableEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode();
	tableEnv.registerCatalog(catalog.getName(), catalog);
	tableEnv.useCatalog(catalog.getName());
	return tableEnv;
}
 
Example 17
Source File: ExecutionContextTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testCatalogs() throws Exception {
	final String inmemoryCatalog = "inmemorycatalog";
	final String hiveCatalog = "hivecatalog";
	final String hiveDefaultVersionCatalog = "hivedefaultversion";

	final ExecutionContext<?> context = createCatalogExecutionContext();
	final TableEnvironment tableEnv = context.createEnvironmentInstance().getTableEnvironment();

	assertEquals(inmemoryCatalog, tableEnv.getCurrentCatalog());
	assertEquals("mydatabase", tableEnv.getCurrentDatabase());

	Catalog catalog = tableEnv.getCatalog(hiveCatalog).orElse(null);
	assertNotNull(catalog);
	assertTrue(catalog instanceof HiveCatalog);
	assertEquals("2.3.4", ((HiveCatalog) catalog).getHiveVersion());

	catalog = tableEnv.getCatalog(hiveDefaultVersionCatalog).orElse(null);
	assertNotNull(catalog);
	assertTrue(catalog instanceof HiveCatalog);
	// make sure we have assigned a default hive version
	assertFalse(StringUtils.isNullOrWhitespaceOnly(((HiveCatalog) catalog).getHiveVersion()));

	tableEnv.useCatalog(hiveCatalog);

	assertEquals(hiveCatalog, tableEnv.getCurrentCatalog());

	Set<String> allCatalogs = new HashSet<>(Arrays.asList(tableEnv.listCatalogs()));
	assertEquals(6, allCatalogs.size());
	assertEquals(
		new HashSet<>(
			Arrays.asList(
				"default_catalog",
				inmemoryCatalog,
				hiveCatalog,
				hiveDefaultVersionCatalog,
				"catalog1",
				"catalog2")
		),
		allCatalogs
	);
}
 
Example 18
Source File: TableEnvHiveConnectorITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
private TableEnvironment getTableEnvWithHiveCatalog() {
	TableEnvironment tableEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode(SqlDialect.HIVE);
	tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
	tableEnv.useCatalog(hiveCatalog.getName());
	return tableEnv;
}
 
Example 19
Source File: TableEnvHiveConnectorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private TableEnvironment getTableEnvWithHiveCatalog() {
	TableEnvironment tableEnv = HiveTestUtils.createTableEnv();
	tableEnv.registerCatalog(hiveCatalog.getName(), hiveCatalog);
	tableEnv.useCatalog(hiveCatalog.getName());
	return tableEnv;
}
 
Example 20
Source File: HiveTableSourceITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testPartitionFilter() throws Exception {
	TableEnvironment tableEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode(SqlDialect.HIVE);
	TestPartitionFilterCatalog catalog = new TestPartitionFilterCatalog(
			hiveCatalog.getName(), hiveCatalog.getDefaultDatabase(), hiveCatalog.getHiveConf(), hiveCatalog.getHiveVersion());
	tableEnv.registerCatalog(catalog.getName(), catalog);
	tableEnv.useCatalog(catalog.getName());
	tableEnv.executeSql("create database db1");
	try {
		tableEnv.executeSql("create table db1.part(x int) partitioned by (p1 int,p2 string)");
		HiveTestUtils.createTextTableInserter(hiveShell, "db1", "part")
				.addRow(new Object[]{1}).commit("p1=1,p2='a'");
		HiveTestUtils.createTextTableInserter(hiveShell, "db1", "part")
				.addRow(new Object[]{2}).commit("p1=2,p2='b'");
		HiveTestUtils.createTextTableInserter(hiveShell, "db1", "part")
				.addRow(new Object[]{3}).commit("p1=3,p2='c'");
		// test string partition columns with special characters
		HiveTestUtils.createTextTableInserter(hiveShell, "db1", "part")
				.addRow(new Object[]{4}).commit("p1=4,p2='c:2'");
		Table query = tableEnv.sqlQuery("select x from db1.part where p1>1 or p2<>'a' order by x");
		String[] explain = query.explain().split("==.*==\n");
		assertFalse(catalog.fallback);
		String optimizedPlan = explain[2];
		assertTrue(optimizedPlan, optimizedPlan.contains("PartitionPruned: true, PartitionNums: 3"));
		List<Row> results = Lists.newArrayList(query.execute().collect());
		assertEquals("[2, 3, 4]", results.toString());

		query = tableEnv.sqlQuery("select x from db1.part where p1>2 and p2<='a' order by x");
		explain = query.explain().split("==.*==\n");
		assertFalse(catalog.fallback);
		optimizedPlan = explain[2];
		assertTrue(optimizedPlan, optimizedPlan.contains("PartitionPruned: true, PartitionNums: 0"));
		results = Lists.newArrayList(query.execute().collect());
		assertEquals("[]", results.toString());

		query = tableEnv.sqlQuery("select x from db1.part where p1 in (1,3,5) order by x");
		explain = query.explain().split("==.*==\n");
		assertFalse(catalog.fallback);
		optimizedPlan = explain[2];
		assertTrue(optimizedPlan, optimizedPlan.contains("PartitionPruned: true, PartitionNums: 2"));
		results = Lists.newArrayList(query.execute().collect());
		assertEquals("[1, 3]", results.toString());

		query = tableEnv.sqlQuery("select x from db1.part where (p1=1 and p2='a') or ((p1=2 and p2='b') or p2='d') order by x");
		explain = query.explain().split("==.*==\n");
		assertFalse(catalog.fallback);
		optimizedPlan = explain[2];
		assertTrue(optimizedPlan, optimizedPlan.contains("PartitionPruned: true, PartitionNums: 2"));
		results = Lists.newArrayList(query.execute().collect());
		assertEquals("[1, 2]", results.toString());

		query = tableEnv.sqlQuery("select x from db1.part where p2 = 'c:2' order by x");
		explain = query.explain().split("==.*==\n");
		assertFalse(catalog.fallback);
		optimizedPlan = explain[2];
		assertTrue(optimizedPlan, optimizedPlan.contains("PartitionPruned: true, PartitionNums: 1"));
		results = Lists.newArrayList(query.execute().collect());
		assertEquals("[4]", results.toString());
	} finally {
		tableEnv.executeSql("drop database db1 cascade");
	}
}