Java Code Examples for org.apache.flink.table.catalog.ObjectPath

The following examples show how to use org.apache.flink.table.catalog.ObjectPath. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink   Source File: FunctionITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAlterFunction() throws Exception {
	String create = "create function f3 as 'org.apache.flink.function.TestFunction'";
	String alter = "alter function f3 as 'org.apache.flink.function.TestFunction2'";

	ObjectPath objectPath = new ObjectPath("default_database", "f3");
	assertTrue(tEnv().getCatalog("default_catalog").isPresent());
	Catalog catalog = tEnv().getCatalog("default_catalog").get();
	tEnv().executeSql(create);
	CatalogFunction beforeUpdate = catalog.getFunction(objectPath);
	assertEquals("org.apache.flink.function.TestFunction", beforeUpdate.getClassName());

	tEnv().executeSql(alter);
	CatalogFunction afterUpdate = catalog.getFunction(objectPath);
	assertEquals("org.apache.flink.function.TestFunction2", afterUpdate.getClassName());
}
 
Example 2
Source Project: flink   Source File: HiveDialectITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAddDropPartitions() throws Exception {
	tableEnv.executeSql("create table tbl (x int,y binary) partitioned by (dt date,country string)");
	tableEnv.executeSql("alter table tbl add partition (dt='2020-04-30',country='china') partition (dt='2020-04-30',country='us')");

	ObjectPath tablePath = new ObjectPath("default", "tbl");
	assertEquals(2, hiveCatalog.listPartitions(tablePath).size());

	String partLocation = warehouse + "/part3_location";
	tableEnv.executeSql(String.format(
			"alter table tbl add partition (dt='2020-05-01',country='belgium') location '%s'", partLocation));
	Table hiveTable = hiveCatalog.getHiveTable(tablePath);
	CatalogPartitionSpec spec = new CatalogPartitionSpec(new LinkedHashMap<String, String>() {{
		put("dt", "2020-05-01");
		put("country", "belgium");
	}});
	Partition hivePartition = hiveCatalog.getHivePartition(hiveTable, spec);
	assertEquals(partLocation, locationPath(hivePartition.getSd().getLocation()));

	tableEnv.executeSql("alter table tbl drop partition (dt='2020-04-30',country='china'),partition (dt='2020-05-01',country='belgium')");
	assertEquals(1, hiveCatalog.listPartitions(tablePath).size());
}
 
Example 3
Source Project: flink   Source File: FunctionITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAlterFunction() throws Exception {
	TableEnvironment tableEnv = getTableEnvironment();
	String create = "create function f3 as 'org.apache.flink.function.TestFunction'";
	String alter = "alter function f3 as 'org.apache.flink.function.TestFunction2'";

	ObjectPath objectPath = new ObjectPath("default_database", "f3");
	assertTrue(tableEnv.getCatalog("default_catalog").isPresent());
	Catalog catalog = tableEnv.getCatalog("default_catalog").get();
	tableEnv.sqlUpdate(create);
	CatalogFunction beforeUpdate = catalog.getFunction(objectPath);
	assertEquals("org.apache.flink.function.TestFunction", beforeUpdate.getClassName());

	tableEnv.sqlUpdate(alter);
	CatalogFunction afterUpdate = catalog.getFunction(objectPath);
	assertEquals("org.apache.flink.function.TestFunction2", afterUpdate.getClassName());
}
 
Example 4
Source Project: flink   Source File: HiveCatalogGenericMetadataTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGenericTableSchema() throws Exception {
	catalog.createDatabase(db1, createDb(), false);

	TableSchema tableSchema = TableSchema.builder()
			.fields(new String[]{"col1", "col2", "col3"},
					new DataType[]{DataTypes.TIMESTAMP(3), DataTypes.TIMESTAMP(6), DataTypes.TIMESTAMP(9)})
			.watermark("col3", "col3", DataTypes.TIMESTAMP(9))
			.build();

	ObjectPath tablePath = new ObjectPath(db1, "generic_table");
	try {
		catalog.createTable(tablePath,
				new CatalogTableImpl(tableSchema, getBatchTableProperties(), TEST_COMMENT),
				false);

		assertEquals(tableSchema, catalog.getTable(tablePath).getSchema());
	} finally {
		catalog.dropTable(tablePath, true);
	}
}
 
Example 5
Source Project: flink   Source File: SqlToOperationConverterTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAlterTableAddUniqueConstraint() throws Exception {
	Catalog catalog = new GenericInMemoryCatalog("default", "default");
	catalogManager.registerCatalog("cat1", catalog);
	catalog.createDatabase("db1", new CatalogDatabaseImpl(new HashMap<>(), null), true);
	CatalogTable catalogTable = new CatalogTableImpl(
			TableSchema.builder()
					.field("a", DataTypes.STRING().notNull())
					.field("b", DataTypes.BIGINT().notNull())
					.build(),
			new HashMap<>(),
			"tb1");
	catalogManager.setCurrentCatalog("cat1");
	catalogManager.setCurrentDatabase("db1");
	catalog.createTable(new ObjectPath("db1", "tb1"), catalogTable, true);
	// Test alter add table constraint.
	thrown.expect(UnsupportedOperationException.class);
	thrown.expectMessage("UNIQUE constraint is not supported yet");
	parse("alter table tb1 add constraint ct1 unique(a, b) not enforced",
			SqlDialect.DEFAULT);
}
 
Example 6
Source Project: flink   Source File: TableEnvHiveConnectorITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testParquetNameMapping() throws Exception {
	TableEnvironment tableEnv = getTableEnvWithHiveCatalog();
	tableEnv.executeSql("create database db1");
	try {
		tableEnv.executeSql("create table db1.t1 (x int,y int) stored as parquet");
		TableEnvUtil.execInsertSqlAndWaitResult(tableEnv, "insert into table db1.t1 values (1,10),(2,20)");
		Table hiveTable = hiveCatalog.getHiveTable(new ObjectPath("db1", "t1"));
		String location = hiveTable.getSd().getLocation();
		tableEnv.executeSql(String.format("create table db1.t2 (y int,x int) stored as parquet location '%s'", location));
		tableEnv.getConfig().getConfiguration().setBoolean(HiveOptions.TABLE_EXEC_HIVE_FALLBACK_MAPRED_READER, true);
		assertEquals("[1, 2]", Lists.newArrayList(tableEnv.sqlQuery("select x from db1.t1").execute().collect()).toString());
		assertEquals("[1, 2]", Lists.newArrayList(tableEnv.sqlQuery("select x from db1.t2").execute().collect()).toString());
	} finally {
		tableEnv.executeSql("drop database db1 cascade");
	}
}
 
Example 7
Source Project: flink   Source File: SqlToOperationConverterTest.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void before() throws TableAlreadyExistException, DatabaseNotExistException {
	final ObjectPath path1 = new ObjectPath(catalogManager.getCurrentDatabase(), "t1");
	final ObjectPath path2 = new ObjectPath(catalogManager.getCurrentDatabase(), "t2");
	final TableSchema tableSchema = TableSchema.builder()
		.field("a", DataTypes.BIGINT())
		.field("b", DataTypes.VARCHAR(Integer.MAX_VALUE))
		.field("c", DataTypes.INT())
		.field("d", DataTypes.VARCHAR(Integer.MAX_VALUE))
		.build();
	Map<String, String> properties = new HashMap<>();
	properties.put("connector", "COLLECTION");
	final CatalogTable catalogTable =  new CatalogTableImpl(tableSchema, properties, "");
	catalog.createTable(path1, catalogTable, true);
	catalog.createTable(path2, catalogTable, true);
}
 
Example 8
Source Project: flink   Source File: HiveCatalog.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public List<CatalogPartitionSpec> listPartitions(ObjectPath tablePath)
		throws TableNotExistException, TableNotPartitionedException, CatalogException {
	checkNotNull(tablePath, "Table path cannot be null");

	Table hiveTable = getHiveTable(tablePath);

	ensurePartitionedTable(tablePath, hiveTable);

	try {
		// pass -1 as max_parts to fetch all partitions
		return client.listPartitionNames(tablePath.getDatabaseName(), tablePath.getObjectName(), (short) -1).stream()
			.map(HiveCatalog::createPartitionSpec).collect(Collectors.toList());
	} catch (TException e) {
		throw new CatalogException(
			String.format("Failed to list partitions of table %s", tablePath), e);
	}
}
 
Example 9
Source Project: flink   Source File: HiveCatalog.java    License: Apache License 2.0 6 votes vote down vote up
private Partition instantiateHivePartition(Table hiveTable, CatalogPartitionSpec partitionSpec, CatalogPartition catalogPartition)
		throws PartitionSpecInvalidException {
	List<String> partCols = getFieldNames(hiveTable.getPartitionKeys());
	List<String> partValues = getOrderedFullPartitionValues(
		partitionSpec, partCols, new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName()));
	// validate partition values
	for (int i = 0; i < partCols.size(); i++) {
		if (StringUtils.isNullOrWhitespaceOnly(partValues.get(i))) {
			throw new PartitionSpecInvalidException(getName(), partCols,
				new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName()), partitionSpec);
		}
	}
	// TODO: handle GenericCatalogPartition
	StorageDescriptor sd = hiveTable.getSd().deepCopy();
	sd.setLocation(catalogPartition.getProperties().remove(HiveCatalogConfig.PARTITION_LOCATION));

	Map<String, String> properties = new HashMap<>(catalogPartition.getProperties());
	properties.put(HiveCatalogConfig.COMMENT, catalogPartition.getComment());

	return HiveTableUtil.createHivePartition(
			hiveTable.getDbName(),
			hiveTable.getTableName(),
			partValues,
			sd,
			properties);
}
 
Example 10
Source Project: flink   Source File: HiveCatalog.java    License: Apache License 2.0 6 votes vote down vote up
private static Function instantiateHiveFunction(ObjectPath functionPath, CatalogFunction function) {

		boolean isGeneric = Boolean.valueOf(function.getProperties().get(CatalogConfig.IS_GENERIC));

		// Hive Function does not have properties map
		// thus, use a prefix in class name to distinguish Flink and Hive functions
		String functionClassName = isGeneric ?
			FLINK_FUNCTION_PREFIX + function.getClassName() :
			function.getClassName();

		return new Function(
			// due to https://issues.apache.org/jira/browse/HIVE-22053, we have to normalize function name ourselves
			HiveStringUtils.normalizeIdentifier(functionPath.getObjectName()),
			functionPath.getDatabaseName(),
			functionClassName,
			null,			// Owner name
			PrincipalType.GROUP,	// Temporarily set to GROUP type because it's required by Hive. May change later
			(int) (System.currentTimeMillis() / 1000),
			FunctionType.JAVA,		// FunctionType only has JAVA now
			new ArrayList<>()		// Resource URIs
		);
	}
 
Example 11
Source Project: flink   Source File: HiveTableOutputFormat.java    License: Apache License 2.0 6 votes vote down vote up
public HiveTableOutputFormat(JobConf jobConf, ObjectPath tablePath, CatalogTable table, HiveTablePartition hiveTablePartition,
							Properties tableProperties, boolean overwrite) {
	super(jobConf.getCredentials());

	Preconditions.checkNotNull(table, "table cannot be null");
	Preconditions.checkNotNull(hiveTablePartition, "HiveTablePartition cannot be null");
	Preconditions.checkNotNull(tableProperties, "Table properties cannot be null");

	HadoopUtils.mergeHadoopConf(jobConf);
	this.jobConf = jobConf;
	this.tablePath = tablePath;
	this.partitionColumns = table.getPartitionKeys();
	TableSchema tableSchema = table.getSchema();
	this.fieldNames = tableSchema.getFieldNames();
	this.fieldTypes = tableSchema.getFieldDataTypes();
	this.hiveTablePartition = hiveTablePartition;
	this.tableProperties = tableProperties;
	this.overwrite = overwrite;
	isPartitioned = partitionColumns != null && !partitionColumns.isEmpty();
	isDynamicPartition = isPartitioned && partitionColumns.size() > hiveTablePartition.getPartitionSpec().size();
	hiveVersion = Preconditions.checkNotNull(jobConf.get(HiveCatalogValidator.CATALOG_HIVE_VERSION),
			"Hive version is not defined");
}
 
Example 12
Source Project: flink   Source File: CatalogStatisticsTest.java    License: Apache License 2.0 6 votes vote down vote up
private void createPartitionStats(
		String part1, int part2, long rowCount) throws Exception {
	ObjectPath path = ObjectPath.fromString("default_database.PartT");

	LinkedHashMap<String, String> partSpecMap = new LinkedHashMap<>();
	partSpecMap.put("part1", part1);
	partSpecMap.put("part2", String.valueOf(part2));
	CatalogPartitionSpec partSpec = new CatalogPartitionSpec(partSpecMap);
	catalog.createPartition(
			path,
			partSpec,
			new CatalogPartitionImpl(new HashMap<>(), ""),
			true);
	catalog.alterPartitionStatistics(
			path,
			partSpec,
			new CatalogTableStatistics(rowCount, 10, 1000L, 2000L),
			true);
}
 
Example 13
Source Project: flink   Source File: SqlToOperationConverterTest.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void before() throws TableAlreadyExistException, DatabaseNotExistException {
	final ObjectPath path1 = new ObjectPath(catalogManager.getCurrentDatabase(), "t1");
	final ObjectPath path2 = new ObjectPath(catalogManager.getCurrentDatabase(), "t2");
	final TableSchema tableSchema = TableSchema.builder()
		.field("a", DataTypes.BIGINT())
		.field("b", DataTypes.VARCHAR(Integer.MAX_VALUE))
		.field("c", DataTypes.INT())
		.field("d", DataTypes.VARCHAR(Integer.MAX_VALUE))
		.build();
	Map<String, String> properties = new HashMap<>();
	properties.put("connector", "COLLECTION");
	final CatalogTable catalogTable =  new CatalogTableImpl(tableSchema, properties, "");
	catalog.createTable(path1, catalogTable, true);
	catalog.createTable(path2, catalogTable, true);
}
 
Example 14
Source Project: flink   Source File: DatabaseCalciteSchema.java    License: Apache License 2.0 6 votes vote down vote up
private Table convertCatalogTable(ObjectPath tablePath, CatalogTable table) {
	TableSource<?> tableSource;
	Optional<TableFactory> tableFactory = catalog.getTableFactory();
	if (tableFactory.isPresent()) {
		TableFactory tf = tableFactory.get();
		if (tf instanceof TableSourceFactory) {
			tableSource = ((TableSourceFactory) tf).createTableSource(tablePath, table);
		} else {
			throw new TableException(String.format("Cannot query a sink-only table. TableFactory provided by catalog %s must implement TableSourceFactory",
				catalog.getClass()));
		}
	} else {
		tableSource = TableFactoryUtil.findAndCreateTableSource(table);
	}

	if (!(tableSource instanceof StreamTableSource)) {
		throw new TableException("Catalog tables support only StreamTableSource and InputFormatTableSource");
	}

	return new TableSourceTable<>(
		tableSource,
		!((StreamTableSource<?>) tableSource).isBounded(),
		FlinkStatistic.UNKNOWN()
	);
}
 
Example 15
Source Project: flink   Source File: JavaCatalogTableTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testResolvingSchemaOfCustomCatalogTableTableApi() throws Exception {
	TableTestUtil testUtil = getTestUtil();
	TableEnvironment tableEnvironment = testUtil.getTableEnv();
	GenericInMemoryCatalog genericInMemoryCatalog = new GenericInMemoryCatalog("in-memory");
	genericInMemoryCatalog.createTable(
		new ObjectPath("default", "testTable"),
		new CustomCatalogTable(isStreamingMode),
		false);
	tableEnvironment.registerCatalog("testCatalog", genericInMemoryCatalog);

	Table table = tableEnvironment.from("testCatalog.`default`.testTable")
		.window(Tumble.over(lit(10).minute()).on($("rowtime")).as("w"))
		.groupBy($("w"))
		.select(lit(1).count());
	testUtil.verifyPlan(table);
}
 
Example 16
Source Project: flink   Source File: SqlToOperationConverterTest.java    License: Apache License 2.0 5 votes vote down vote up
@After
public void after() throws TableNotExistException {
	final ObjectPath path1 = new ObjectPath(catalogManager.getCurrentDatabase(), "t1");
	final ObjectPath path2 = new ObjectPath(catalogManager.getCurrentDatabase(), "t2");
	catalog.dropTable(path1, true);
	catalog.dropTable(path2, true);
}
 
Example 17
Source Project: bahir-flink   Source File: KuduCatalog.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public boolean tableExists(ObjectPath tablePath) {
    checkNotNull(tablePath);
    try {
        return kuduClient.tableExists(tablePath.getObjectName());
    } catch (KuduException e) {
        throw new CatalogException(e);
    }
}
 
Example 18
Source Project: bahir-flink   Source File: KuduCatalog.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void createTable(ObjectPath tablePath, CatalogBaseTable table, boolean ignoreIfExists) throws TableAlreadyExistException {
    Map<String, String> tableProperties = table.getProperties();
    TableSchema tableSchema = table.getSchema();

    Set<String> optionalProperties = new HashSet<>(Arrays.asList(KUDU_REPLICAS));
    Set<String> requiredProperties = new HashSet<>(Arrays.asList(KUDU_HASH_COLS));

    if (!tableSchema.getPrimaryKey().isPresent()) {
        requiredProperties.add(KUDU_PRIMARY_KEY_COLS);
    }

    if (!tableProperties.keySet().containsAll(requiredProperties)) {
        throw new CatalogException("Missing required property. The following properties must be provided: " +
                requiredProperties.toString());
    }

    Set<String> permittedProperties = Sets.union(requiredProperties, optionalProperties);
    if (!permittedProperties.containsAll(tableProperties.keySet())) {
        throw new CatalogException("Unpermitted properties were given. The following properties are allowed:" +
                permittedProperties.toString());
    }

    String tableName = tablePath.getObjectName();

    KuduTableInfo tableInfo = KuduTableUtils.createTableInfo(tableName, tableSchema, tableProperties);

    createTable(tableInfo, ignoreIfExists);
}
 
Example 19
Source Project: sylph   Source File: FlinkSqlParser.java    License: Apache License 2.0 5 votes vote down vote up
private void translateJoin(JoinInfo joinInfo, Map<String, CreateTable> batchTables)
{
    Table streamTable = getTable(tableEnv, joinInfo.getStreamTable());
    RowTypeInfo streamRowType = (RowTypeInfo) streamTable.getSchema().toRowType();
    DataStream<Row> inputStream = tableEnv.toAppendStream(streamTable, org.apache.flink.types.Row.class);
    inputStream.getTransformation().setOutputType(streamRowType);

    //get batch table schema
    CreateTable batchTable = requireNonNull(batchTables.get(joinInfo.getBatchTable().getName()), "batch table [" + joinInfo.getJoinTableName() + "] not exits");
    RowTypeInfo batchTableRowType = StreamSqlUtil.schemaToRowTypeInfo(StreamSqlUtil.getTableSchema(batchTable));
    List<SelectField> joinSelectFields = getAllSelectFields(joinInfo, streamRowType, batchTableRowType);

    //It is recommended to do keyby first.
    JoinContext joinContext = JoinContextImpl.createContext(joinInfo, streamRowType, joinSelectFields);
    RealTimeTransForm transForm = getJoinTransForm(joinContext, batchTable);
    DataStream<Row> joinResultStream = AsyncFunctionHelper.translate(inputStream, transForm);

    //set schema
    RowTypeInfo rowTypeInfo = getJoinOutScheam(joinSelectFields);
    joinResultStream.getTransformation().setOutputType(rowTypeInfo);
    //--register tmp joinTable

    Catalog catalog = tableEnv.getCatalog(tableEnv.getCurrentCatalog()).get();
    if (catalog.tableExists(ObjectPath.fromString(joinInfo.getJoinTableName()))) {
        Table table = tableEnv.fromDataStream(joinResultStream);
        CatalogBaseTable tableTable = new QueryOperationCatalogView(table.getQueryOperation());
        try {
            catalog.createTable(ObjectPath.fromString(joinInfo.getJoinTableName()), tableTable, true);
        }
        catch (TableAlreadyExistException | DatabaseNotExistException e) {
            e.printStackTrace();
        }
        //tableEnv.replaceRegisteredTable(joinInfo.getJoinTableName(), new RelTable(table.getRelNode()));
    }
    else {
        tableEnv.registerDataStream(joinInfo.getJoinTableName(), joinResultStream);
    }
    //next update join select query
    joinQueryUpdate(joinInfo, rowTypeInfo.getFieldNames());
}
 
Example 20
Source Project: flink   Source File: HiveCatalogTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCreateGenericTable() {
	Table hiveTable = HiveTableUtil.instantiateHiveTable(
		new ObjectPath("test", "test"),
		new CatalogTableImpl(
			schema,
			new FileSystem().path("/test_path").toProperties(),
			null
		),
		HiveTestUtils.createHiveConf());

	Map<String, String> prop = hiveTable.getParameters();
	assertEquals(prop.remove(CatalogConfig.IS_GENERIC), String.valueOf("true"));
	assertTrue(prop.keySet().stream().allMatch(k -> k.startsWith(CatalogConfig.FLINK_PROPERTY_PREFIX)));
}
 
Example 21
Source Project: flink   Source File: DatabaseCalciteSchema.java    License: Apache License 2.0 5 votes vote down vote up
private Table convertConnectorTable(
		ConnectorCatalogTable<?, ?> table,
		ObjectPath tablePath) throws TableNotExistException {
	if (table.getTableSource().isPresent()) {
		TableSource<?> tableSource = table.getTableSource().get();
		if (!(tableSource instanceof StreamTableSource ||
				tableSource instanceof LookupableTableSource)) {
			throw new TableException(
					"Only StreamTableSource and LookupableTableSource can be used in Blink planner.");
		}
		if (!isStreamingMode && tableSource instanceof StreamTableSource &&
				!((StreamTableSource<?>) tableSource).isBounded()) {
			throw new TableException("Only bounded StreamTableSource can be used in batch mode.");
		}

		TableStats tableStats = TableStats.UNKNOWN;
		// TODO supports stats for partitionable table
		if (!table.isPartitioned()) {
			CatalogTableStatistics tableStatistics = catalog.getTableStatistics(tablePath);
			CatalogColumnStatistics columnStatistics = catalog.getTableColumnStatistics(tablePath);
			tableStats = convertToTableStats(tableStatistics, columnStatistics);
		}
		return new TableSourceTable<>(
				tableSource,
				isStreamingMode,
				FlinkStatistic.builder().tableStats(tableStats).build());
	} else {
		Optional<TableSinkTable> tableSinkTable = table.getTableSink()
			.map(tableSink -> new TableSinkTable<>(
				tableSink,
				FlinkStatistic.UNKNOWN()));
		if (tableSinkTable.isPresent()) {
			return tableSinkTable.get();
		} else {
			throw new TableException("Cannot convert a connector table " +
				"without either source or sink.");
		}
	}
}
 
Example 22
Source Project: flink   Source File: HiveCatalog.java    License: Apache License 2.0 5 votes vote down vote up
private String getPartitionName(ObjectPath tablePath, CatalogPartitionSpec partitionSpec, Table hiveTable) throws PartitionSpecInvalidException {
	List<String> partitionCols = getFieldNames(hiveTable.getPartitionKeys());
	List<String> partitionVals = getOrderedFullPartitionValues(partitionSpec, partitionCols, tablePath);
	List<String> partKVs = new ArrayList<>();
	for (int i = 0; i < partitionCols.size(); i++) {
		partKVs.add(partitionCols.get(i) + "=" + partitionVals.get(i));
	}
	return String.join("/", partKVs);
}
 
Example 23
Source Project: pulsar-flink   Source File: PulsarMetadataReader.java    License: Apache License 2.0 5 votes vote down vote up
public TableSchema getTableSchema(ObjectPath objectPath) throws PulsarAdminException {
    String topicName = objectPath2TopicName(objectPath);
    FieldsDataType fieldsDataType = null;
    try {
        fieldsDataType = getSchema(Collections.singletonList(topicName));
    } catch (IncompatibleSchemaException e) {
        throw new PulsarAdminException(e);
    }
    return SchemaUtils.toTableSchema(fieldsDataType);
}
 
Example 24
Source Project: flink   Source File: HiveCatalog.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public CatalogBaseTable getTable(ObjectPath tablePath) throws TableNotExistException, CatalogException {
	checkNotNull(tablePath, "tablePath cannot be null");

	Table hiveTable = getHiveTable(tablePath);
	return instantiateCatalogTable(hiveTable, hiveConf);
}
 
Example 25
Source Project: flink   Source File: TableFactoryUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates a table sink for a {@link CatalogTable} using table factory associated with the catalog.
 */
public static Optional<TableSink> createTableSinkForCatalogTable(Catalog catalog, CatalogTable catalogTable, ObjectPath tablePath) {
	TableFactory tableFactory = catalog.getTableFactory().orElse(null);
	if (tableFactory instanceof TableSinkFactory) {
		return Optional.ofNullable(((TableSinkFactory) tableFactory).createTableSink(tablePath, catalogTable));
	}
	return Optional.empty();
}
 
Example 26
Source Project: flink   Source File: HiveDialectITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCreateTableWithConstraints() throws Exception {
	Assume.assumeTrue(HiveVersionTestUtil.HIVE_310_OR_LATER);
	tableEnv.executeSql("create table tbl (x int,y int not null disable novalidate rely,z int not null disable novalidate norely," +
			"constraint pk_name primary key (x) rely)");
	CatalogTable catalogTable = (CatalogTable) hiveCatalog.getTable(new ObjectPath("default", "tbl"));
	TableSchema tableSchema = catalogTable.getSchema();
	assertTrue("PK not present", tableSchema.getPrimaryKey().isPresent());
	assertEquals("pk_name", tableSchema.getPrimaryKey().get().getName());
	assertFalse("PK cannot be null", tableSchema.getFieldDataTypes()[0].getLogicalType().isNullable());
	assertFalse("RELY NOT NULL should be reflected in schema",
			tableSchema.getFieldDataTypes()[1].getLogicalType().isNullable());
	assertTrue("NORELY NOT NULL shouldn't be reflected in schema",
			tableSchema.getFieldDataTypes()[2].getLogicalType().isNullable());
}
 
Example 27
Source Project: bahir-flink   Source File: KuduCatalog.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void renameTable(ObjectPath tablePath, String newTableName, boolean ignoreIfNotExists) throws TableNotExistException {
    String tableName = tablePath.getObjectName();
    try {
        if (tableExists(tablePath)) {
            kuduClient.alterTable(tableName, new AlterTableOptions().renameTable(newTableName));
        } else if (!ignoreIfNotExists) {
            throw new TableNotExistException(getName(), tablePath);
        }
    } catch (KuduException e) {
        throw new CatalogException("Could not rename table " + tableName, e);
    }
}
 
Example 28
Source Project: flink   Source File: SqlToOperationConverterTest.java    License: Apache License 2.0 5 votes vote down vote up
@After
public void after() throws TableNotExistException {
	final ObjectPath path1 = new ObjectPath(catalogManager.getCurrentDatabase(), "t1");
	final ObjectPath path2 = new ObjectPath(catalogManager.getCurrentDatabase(), "t2");
	catalog.dropTable(path1, true);
	catalog.dropTable(path2, true);
}
 
Example 29
Source Project: flink   Source File: HiveTableFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSource<Row> createTableSource(ObjectPath tablePath, CatalogTable table) {
	Preconditions.checkNotNull(table);
	Preconditions.checkArgument(table instanceof CatalogTableImpl);

	boolean isGeneric = Boolean.valueOf(table.getProperties().get(CatalogConfig.IS_GENERIC));

	if (!isGeneric) {
		return createInputFormatTableSource(tablePath, table);
	} else {
		return TableFactoryUtil.findAndCreateTableSource(table);
	}
}
 
Example 30
Source Project: flink   Source File: HiveDialectITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCreateTable() throws Exception {
	String location = warehouse + "/external_location";
	tableEnv.executeSql(String.format(
			"create external table tbl1 (d decimal(10,0),ts timestamp) partitioned by (p string) location '%s' tblproperties('k1'='v1')", location));
	Table hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl1"));
	assertEquals(TableType.EXTERNAL_TABLE.toString(), hiveTable.getTableType());
	assertEquals(1, hiveTable.getPartitionKeysSize());
	assertEquals(location, locationPath(hiveTable.getSd().getLocation()));
	assertEquals("v1", hiveTable.getParameters().get("k1"));
	assertFalse(hiveTable.getParameters().containsKey(SqlCreateHiveTable.TABLE_LOCATION_URI));

	tableEnv.executeSql("create table tbl2 (s struct<ts:timestamp,bin:binary>) stored as orc");
	hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl2"));
	assertEquals(TableType.MANAGED_TABLE.toString(), hiveTable.getTableType());
	assertEquals(OrcSerde.class.getName(), hiveTable.getSd().getSerdeInfo().getSerializationLib());
	assertEquals(OrcInputFormat.class.getName(), hiveTable.getSd().getInputFormat());
	assertEquals(OrcOutputFormat.class.getName(), hiveTable.getSd().getOutputFormat());

	tableEnv.executeSql("create table tbl3 (m map<timestamp,binary>) partitioned by (p1 bigint,p2 tinyint) " +
			"row format serde 'org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe'");
	hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl3"));
	assertEquals(2, hiveTable.getPartitionKeysSize());
	assertEquals(LazyBinarySerDe.class.getName(), hiveTable.getSd().getSerdeInfo().getSerializationLib());

	tableEnv.executeSql("create table tbl4 (x int,y smallint) row format delimited fields terminated by '|' lines terminated by '\n'");
	hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl4"));
	assertEquals("|", hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.FIELD_DELIM));
	assertEquals("|", hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.SERIALIZATION_FORMAT));
	assertEquals("\n", hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.LINE_DELIM));

	tableEnv.executeSql("create table tbl5 (m map<bigint,string>) row format delimited collection items terminated by ';' " +
			"map keys terminated by ':'");
	hiveTable = hiveCatalog.getHiveTable(new ObjectPath("default", "tbl5"));
	assertEquals(";", hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.COLLECTION_DELIM));
	assertEquals(":", hiveTable.getSd().getSerdeInfo().getParameters().get(serdeConstants.MAPKEY_DELIM));
}