org.apache.flink.table.sinks.StreamTableSink Java Examples

The following examples show how to use org.apache.flink.table.sinks.StreamTableSink. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaTableSourceSinkFactoryBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	final TableSchema schema = descriptorProperties.getTableSchema(SCHEMA());
	final String topic = descriptorProperties.getString(CONNECTOR_TOPIC);
	final Optional<String> proctime = SchemaValidator.deriveProctimeAttribute(descriptorProperties);
	final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors =
		SchemaValidator.deriveRowtimeAttributes(descriptorProperties);

	// see also FLINK-9870
	if (proctime.isPresent() || !rowtimeAttributeDescriptors.isEmpty() ||
			checkForCustomFieldMapping(descriptorProperties, schema)) {
		throw new TableException("Time attributes and custom field mappings are not supported yet.");
	}

	return createKafkaTableSink(
		schema,
		topic,
		getKafkaProperties(descriptorProperties),
		getFlinkKafkaPartitioner(descriptorProperties),
		getSerializationSchema(properties));
}
 
Example #2
Source File: ElasticsearchUpsertTableSinkFactoryBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	return createElasticsearchUpsertTableSink(
		descriptorProperties.isValue(UPDATE_MODE, UPDATE_MODE_VALUE_APPEND),
		descriptorProperties.getTableSchema(SCHEMA),
		getHosts(descriptorProperties),
		descriptorProperties.getString(CONNECTOR_INDEX),
		descriptorProperties.getString(CONNECTOR_DOCUMENT_TYPE),
		descriptorProperties.getOptionalString(CONNECTOR_KEY_DELIMITER).orElse(DEFAULT_KEY_DELIMITER),
		descriptorProperties.getOptionalString(CONNECTOR_KEY_NULL_LITERAL).orElse(DEFAULT_KEY_NULL_LITERAL),
		getSerializationSchema(properties),
		SUPPORTED_CONTENT_TYPE,
		getFailureHandler(descriptorProperties),
		getSinkOptions(descriptorProperties));
}
 
Example #3
Source File: HiveTableFactoryTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testGenericTable() throws Exception {
	TableSchema schema = TableSchema.builder()
		.field("name", DataTypes.STRING())
		.field("age", DataTypes.INT())
		.build();

	Map<String, String> properties = new HashMap<>();
	properties.put(CatalogConfig.IS_GENERIC, String.valueOf(true));
	properties.put("connector", "COLLECTION");

	catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true);
	ObjectPath path = new ObjectPath("mydb", "mytable");
	CatalogTable table = new CatalogTableImpl(schema, properties, "csv table");
	catalog.createTable(path, table, true);
	Optional<TableFactory> opt = catalog.getTableFactory();
	assertTrue(opt.isPresent());
	HiveTableFactory tableFactory = (HiveTableFactory) opt.get();
	TableSource tableSource = tableFactory.createTableSource(path, table);
	assertTrue(tableSource instanceof StreamTableSource);
	TableSink tableSink = tableFactory.createTableSink(path, table);
	assertTrue(tableSink instanceof StreamTableSink);
}
 
Example #4
Source File: KafkaTableSourceSinkFactoryBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	final TableSchema schema = descriptorProperties.getTableSchema(SCHEMA);
	final String topic = descriptorProperties.getString(CONNECTOR_TOPIC);
	final Optional<String> proctime = SchemaValidator.deriveProctimeAttribute(descriptorProperties);
	final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors =
		SchemaValidator.deriveRowtimeAttributes(descriptorProperties);

	// see also FLINK-9870
	if (proctime.isPresent() || !rowtimeAttributeDescriptors.isEmpty() ||
			checkForCustomFieldMapping(descriptorProperties, schema)) {
		throw new TableException("Time attributes and custom field mappings are not supported yet.");
	}

	return createKafkaTableSink(
		schema,
		topic,
		getKafkaProperties(descriptorProperties),
		getFlinkKafkaPartitioner(descriptorProperties),
		getSerializationSchema(properties));
}
 
Example #5
Source File: ElasticsearchUpsertTableSinkFactoryBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	return createElasticsearchUpsertTableSink(
		descriptorProperties.isValue(UPDATE_MODE(), UPDATE_MODE_VALUE_APPEND()),
		descriptorProperties.getTableSchema(SCHEMA()),
		getHosts(descriptorProperties),
		descriptorProperties.getString(CONNECTOR_INDEX),
		descriptorProperties.getString(CONNECTOR_DOCUMENT_TYPE),
		descriptorProperties.getOptionalString(CONNECTOR_KEY_DELIMITER).orElse(DEFAULT_KEY_DELIMITER),
		descriptorProperties.getOptionalString(CONNECTOR_KEY_NULL_LITERAL).orElse(DEFAULT_KEY_NULL_LITERAL),
		getSerializationSchema(properties),
		SUPPORTED_CONTENT_TYPE,
		getFailureHandler(descriptorProperties),
		getSinkOptions(descriptorProperties));
}
 
Example #6
Source File: KafkaTableSourceSinkFactoryBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	final TableSchema schema = TableSchemaUtils.getPhysicalSchema(
		descriptorProperties.getTableSchema(SCHEMA));
	final String topic = descriptorProperties.getString(CONNECTOR_TOPIC);
	final Optional<String> proctime = SchemaValidator.deriveProctimeAttribute(descriptorProperties);
	final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors =
		SchemaValidator.deriveRowtimeAttributes(descriptorProperties);

	// see also FLINK-9870
	if (proctime.isPresent() || !rowtimeAttributeDescriptors.isEmpty() ||
			checkForCustomFieldMapping(descriptorProperties, schema)) {
		throw new TableException("Time attributes and custom field mappings are not supported yet.");
	}

	return createKafkaTableSink(
		schema,
		topic,
		getKafkaProperties(descriptorProperties),
		getFlinkKafkaPartitioner(descriptorProperties),
		getSerializationSchema(properties));
}
 
Example #7
Source File: JdbcTableSourceSinkFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) {
	DescriptorProperties descriptorProperties = getValidatedProperties(properties);
	TableSchema schema = TableSchemaUtils.getPhysicalSchema(
		descriptorProperties.getTableSchema(SCHEMA));

	final JdbcUpsertTableSink.Builder builder = JdbcUpsertTableSink.builder()
		.setOptions(getJdbcOptions(descriptorProperties))
		.setTableSchema(schema);

	descriptorProperties.getOptionalInt(CONNECTOR_WRITE_FLUSH_MAX_ROWS).ifPresent(builder::setFlushMaxSize);
	descriptorProperties.getOptionalDuration(CONNECTOR_WRITE_FLUSH_INTERVAL).ifPresent(
		s -> builder.setFlushIntervalMills(s.toMillis()));
	descriptorProperties.getOptionalInt(CONNECTOR_WRITE_MAX_RETRIES).ifPresent(builder::setMaxRetryTimes);

	return builder.build();
}
 
Example #8
Source File: ElasticsearchUpsertTableSinkFactoryBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	return createElasticsearchUpsertTableSink(
		descriptorProperties.isValue(UPDATE_MODE, UPDATE_MODE_VALUE_APPEND),
		TableSchemaUtils.getPhysicalSchema(descriptorProperties.getTableSchema(SCHEMA)),
		getHosts(descriptorProperties),
		descriptorProperties.getString(CONNECTOR_INDEX),
		descriptorProperties.getString(CONNECTOR_DOCUMENT_TYPE),
		descriptorProperties.getOptionalString(CONNECTOR_KEY_DELIMITER).orElse(DEFAULT_KEY_DELIMITER),
		descriptorProperties.getOptionalString(CONNECTOR_KEY_NULL_LITERAL).orElse(DEFAULT_KEY_NULL_LITERAL),
		getSerializationSchema(properties),
		SUPPORTED_CONTENT_TYPE,
		getFailureHandler(descriptorProperties),
		getSinkOptions(descriptorProperties));
}
 
Example #9
Source File: TestTableSinkFactoryBase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties params = new DescriptorProperties(true);
	params.putProperties(properties);
	return new TestTableSink(
			SchemaValidator.deriveTableSinkSchema(params),
			properties.get(testProperty));
}
 
Example #10
Source File: HBaseTableFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);
	HBaseOptions.Builder hbaseOptionsBuilder = HBaseOptions.builder();
	hbaseOptionsBuilder.setZkQuorum(descriptorProperties.getString(CONNECTOR_ZK_QUORUM));
	hbaseOptionsBuilder.setTableName(descriptorProperties.getString(CONNECTOR_TABLE_NAME));
	descriptorProperties
		.getOptionalString(CONNECTOR_ZK_NODE_PARENT)
		.ifPresent(hbaseOptionsBuilder::setZkNodeParent);

	TableSchema tableSchema = descriptorProperties.getTableSchema(SCHEMA);
	HBaseTableSchema hbaseSchema = validateTableSchema(tableSchema);

	HBaseWriteOptions.Builder writeBuilder = HBaseWriteOptions.builder();
	descriptorProperties
		.getOptionalInt(CONNECTOR_WRITE_BUFFER_FLUSH_MAX_ROWS)
		.ifPresent(writeBuilder::setBufferFlushMaxRows);
	descriptorProperties
		.getOptionalMemorySize(CONNECTOR_WRITE_BUFFER_FLUSH_MAX_SIZE)
		.ifPresent(v -> writeBuilder.setBufferFlushMaxSizeInBytes(v.getBytes()));
	descriptorProperties
		.getOptionalDuration(CONNECTOR_WRITE_BUFFER_FLUSH_INTERVAL)
		.ifPresent(v -> writeBuilder.setBufferFlushIntervalMillis(v.toMillis()));

	return new HBaseUpsertTableSink(
		hbaseSchema,
		hbaseOptionsBuilder.build(),
		writeBuilder.build()
	);
}
 
Example #11
Source File: JDBCTableSourceSinkFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	final JDBCUpsertTableSink.Builder builder = JDBCUpsertTableSink.builder()
		.setOptions(getJDBCOptions(descriptorProperties))
		.setTableSchema(descriptorProperties.getTableSchema(SCHEMA));

	descriptorProperties.getOptionalInt(CONNECTOR_WRITE_FLUSH_MAX_ROWS).ifPresent(builder::setFlushMaxSize);
	descriptorProperties.getOptionalDuration(CONNECTOR_WRITE_FLUSH_INTERVAL).ifPresent(
		s -> builder.setFlushIntervalMills(s.toMillis()));
	descriptorProperties.getOptionalInt(CONNECTOR_WRITE_MAX_RETRIES).ifPresent(builder::setMaxRetryTimes);

	return builder.build();
}
 
Example #12
Source File: TestTableSinkFactoryBase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties params = new DescriptorProperties(true);
	params.putProperties(properties);
	return new TestTableSink(
			SchemaValidator.deriveTableSinkSchema(params),
			properties.get(testProperty));
}
 
Example #13
Source File: StreamTableSinkFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Only create stream table sink.
 */
@Override
default TableSink<T> createTableSink(Map<String, String> properties) {
	StreamTableSink<T> sink = createStreamTableSink(properties);
	if (sink == null) {
		throw new ValidationException(
				"Please override 'createTableSink(Context)' method.");
	}
	return sink;
}
 
Example #14
Source File: HBaseTableFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);
	HBaseOptions.Builder hbaseOptionsBuilder = HBaseOptions.builder();
	hbaseOptionsBuilder.setZkQuorum(descriptorProperties.getString(CONNECTOR_ZK_QUORUM));
	hbaseOptionsBuilder.setTableName(descriptorProperties.getString(CONNECTOR_TABLE_NAME));
	descriptorProperties
		.getOptionalString(CONNECTOR_ZK_NODE_PARENT)
		.ifPresent(hbaseOptionsBuilder::setZkNodeParent);

	TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema(
		descriptorProperties.getTableSchema(SCHEMA));
	HBaseTableSchema hbaseSchema = validateTableSchema(tableSchema);

	HBaseWriteOptions.Builder writeBuilder = HBaseWriteOptions.builder();
	descriptorProperties
		.getOptionalInt(CONNECTOR_WRITE_BUFFER_FLUSH_MAX_ROWS)
		.ifPresent(writeBuilder::setBufferFlushMaxRows);
	descriptorProperties
		.getOptionalMemorySize(CONNECTOR_WRITE_BUFFER_FLUSH_MAX_SIZE)
		.ifPresent(v -> writeBuilder.setBufferFlushMaxSizeInBytes(v.getBytes()));
	descriptorProperties
		.getOptionalDuration(CONNECTOR_WRITE_BUFFER_FLUSH_INTERVAL)
		.ifPresent(v -> writeBuilder.setBufferFlushIntervalMillis(v.toMillis()));

	return new HBaseUpsertTableSink(
		hbaseSchema,
		hbaseOptionsBuilder.build(),
		writeBuilder.build()
	);
}
 
Example #15
Source File: HiveTableFactoryTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testGenericTable() throws Exception {
	TableSchema schema = TableSchema.builder()
		.field("name", DataTypes.STRING())
		.field("age", DataTypes.INT())
		.build();

	Map<String, String> properties = new HashMap<>();
	properties.put(CatalogConfig.IS_GENERIC, String.valueOf(true));
	properties.put("connector", "COLLECTION");

	catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true);
	ObjectPath path = new ObjectPath("mydb", "mytable");
	CatalogTable table = new CatalogTableImpl(schema, properties, "csv table");
	catalog.createTable(path, table, true);
	Optional<TableFactory> opt = catalog.getTableFactory();
	assertTrue(opt.isPresent());
	HiveTableFactory tableFactory = (HiveTableFactory) opt.get();
	TableSource tableSource = tableFactory.createTableSource(new TableSourceFactoryContextImpl(
			ObjectIdentifier.of("mycatalog", "mydb", "mytable"), table, new Configuration()));
	assertTrue(tableSource instanceof StreamTableSource);
	TableSink tableSink = tableFactory.createTableSink(new TableSinkFactoryContextImpl(
			ObjectIdentifier.of("mycatalog", "mydb", "mytable"),
			table,
			new Configuration(),
			true));
	assertTrue(tableSink instanceof StreamTableSink);
}
 
Example #16
Source File: TestTableSinkFactoryBase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public StreamTableSink<Row> createTableSink(TableSinkFactory.Context context) {
	return new TestTableSink(
			context.getTable().getSchema(),
			context.getTable().getProperties().get(testProperty));
}
 
Example #17
Source File: RedisTableSinkFactory.java    From bahir-flink with Apache License 2.0 4 votes vote down vote up
@Override
public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) {
    return new RedisTableSink(properties);
}
 
Example #18
Source File: FlinkPravegaStreamTableSinkFactory.java    From flink-connectors with Apache License 2.0 4 votes vote down vote up
@Override
public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) {
    return createFlinkPravegaTableSink(properties);
}
 
Example #19
Source File: StreamTableSinkFactory.java    From flink with Apache License 2.0 2 votes vote down vote up
/**
 * Creates and configures a {@link StreamTableSink} using the given properties.
 *
 * @param properties normalized properties describing a table sink.
 * @return the configured table sink.
 * @deprecated {@link Context} contains more information, and already contains table schema too.
 * Please use {@link #createTableSink(Context)} instead.
 */
@Deprecated
default StreamTableSink<T> createStreamTableSink(Map<String, String> properties) {
	return null;
}
 
Example #20
Source File: StreamTableSinkFactory.java    From flink with Apache License 2.0 2 votes vote down vote up
/**
 * Creates and configures a {@link StreamTableSink} using the given properties.
 *
 * @param properties normalized properties describing a table sink.
 * @return the configured table sink.
 */
StreamTableSink<T> createStreamTableSink(Map<String, String> properties);