Java Code Examples for org.apache.flink.table.sinks.TableSink

The following examples show how to use org.apache.flink.table.sinks.TableSink. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
@Override
public TableSink<Row> createTableSink(Map<String, String> prop) {
	DescriptorProperties params = new DescriptorProperties();
	params.putProperties(prop);

	new DatahubDescriptorValidator().validate(params);

	TableSchema schema = params.getTableSchema(SCHEMA);

	String project = params.getString(CONNECTOR_PROJECT);
	String topic = params.getString(CONNECTOR_TOPIC);
	String accessId = params.getString(CONNECTOR_ACCESS_ID);
	String accessKey = params.getString(CONNECTOR_ACCESS_KEY);
	String endpoint = params.getString(CONNECTOR_ENDPOINT);

	return new DatahubTableSink(
			project,
			topic,
			accessId,
			accessKey,
			endpoint,
			schema,
			params
	);
}
 
Example 2
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	if (!Arrays.equals(getFieldNames(), fieldNames) || !Arrays.equals(getFieldTypes(), fieldTypes)) {
		throw new ValidationException("Reconfiguration with different fields is not allowed. " +
			"Expected: " + Arrays.toString(getFieldNames()) + " / " + Arrays.toString(getFieldTypes()) + ". " +
			"But was: " + Arrays.toString(fieldNames) + " / " + Arrays.toString(fieldTypes));
	}
	return copy(
		isAppendOnly,
		schema,
		hosts,
		index,
		docType,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions,
		requestFactory);
}
 
Example 3
@Test
public void testValidWriterModeExactlyOnce() {
    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);

    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("name").withWriterMode(PravegaWriterMode.EXACTLY_ONCE)
            .forStream(stream)
            .withPravegaConfig(PRAVEGA_CONFIG);

    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(JSON)
            .withSchema(SCHEMA)
            .inAppendMode();

    final Map<String, String> propertiesMap = testDesc.toProperties();
    final TableSink<?> sink = TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
            .createStreamTableSink(propertiesMap);
    assertNotNull(sink);
}
 
Example 4
Source Project: flink   Source File: ElasticsearchUpsertTableSinkBase.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	if (!Arrays.equals(getFieldNames(), fieldNames) || !Arrays.equals(getFieldTypes(), fieldTypes)) {
		throw new ValidationException("Reconfiguration with different fields is not allowed. " +
			"Expected: " + Arrays.toString(getFieldNames()) + " / " + Arrays.toString(getFieldTypes()) + ". " +
			"But was: " + Arrays.toString(fieldNames) + " / " + Arrays.toString(fieldTypes));
	}
	return copy(
		isAppendOnly,
		schema,
		hosts,
		index,
		docType,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions,
		requestFactory);
}
 
Example 5
Source Project: flink   Source File: HiveTableFactoryTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGenericTable() throws Exception {
	TableSchema schema = TableSchema.builder()
		.field("name", DataTypes.STRING())
		.field("age", DataTypes.INT())
		.build();

	Map<String, String> properties = new HashMap<>();
	properties.put(CatalogConfig.IS_GENERIC, String.valueOf(true));
	properties.put("connector", "COLLECTION");

	catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true);
	ObjectPath path = new ObjectPath("mydb", "mytable");
	CatalogTable table = new CatalogTableImpl(schema, properties, "csv table");
	catalog.createTable(path, table, true);
	Optional<TableFactory> opt = catalog.getTableFactory();
	assertTrue(opt.isPresent());
	HiveTableFactory tableFactory = (HiveTableFactory) opt.get();
	TableSource tableSource = tableFactory.createTableSource(path, table);
	assertTrue(tableSource instanceof StreamTableSource);
	TableSink tableSink = tableFactory.createTableSink(path, table);
	assertTrue(tableSink instanceof StreamTableSink);
}
 
Example 6
Source Project: flink   Source File: HiveTableFactoryTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testHiveTable() throws Exception {
	TableSchema schema = TableSchema.builder()
		.field("name", DataTypes.STRING())
		.field("age", DataTypes.INT())
		.build();

	Map<String, String> properties = new HashMap<>();

	catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true);
	ObjectPath path = new ObjectPath("mydb", "mytable");
	CatalogTable table = new CatalogTableImpl(schema, properties, "hive table");
	catalog.createTable(path, table, true);
	Optional<TableFactory> opt = catalog.getTableFactory();
	assertTrue(opt.isPresent());
	HiveTableFactory tableFactory = (HiveTableFactory) opt.get();
	TableSink tableSink = tableFactory.createTableSink(path, table);
	assertTrue(tableSink instanceof HiveTableSink);
	TableSource tableSource = tableFactory.createTableSource(path, table);
	assertTrue(tableSource instanceof HiveTableSource);
}
 
Example 7
Source Project: flink   Source File: TableEnvironmentImpl.java    License: Apache License 2.0 6 votes vote down vote up
private void registerTableSinkInternal(String name, TableSink<?> tableSink) {
	Optional<CatalogBaseTable> table = getCatalogTable(catalogManager.getBuiltInCatalogName(),
		catalogManager.getBuiltInDatabaseName(), name);

	if (table.isPresent()) {
		if (table.get() instanceof ConnectorCatalogTable<?, ?>) {
			ConnectorCatalogTable<?, ?> sourceSinkTable = (ConnectorCatalogTable<?, ?>) table.get();
			if (sourceSinkTable.getTableSink().isPresent()) {
				throw new ValidationException(String.format(
					"Table '%s' already exists. Please choose a different name.", name));
			} else {
				// wrapper contains only sink (not source)
				replaceTableInternal(
					name,
					ConnectorCatalogTable
						.sourceAndSink(sourceSinkTable.getTableSource().get(), tableSink, !IS_STREAM_TABLE));
			}
		} else {
			throw new ValidationException(String.format(
				"Table '%s' already exists. Please choose a different name.", name));
		}
	} else {
		registerTableInternal(name, ConnectorCatalogTable.sink(tableSink, !IS_STREAM_TABLE));
	}
}
 
Example 8
@Test
public void testValidWriterModeAtleastOnce() {
    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);

    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("name").withWriterMode(PravegaWriterMode.ATLEAST_ONCE)
            .forStream(stream)
            .withPravegaConfig(PRAVEGA_CONFIG);

    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(JSON)
            .withSchema(SCHEMA)
            .inAppendMode();

    final Map<String, String> propertiesMap = testDesc.toProperties();
    final TableSink<?> sink = TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
            .createStreamTableSink(propertiesMap);
    assertNotNull(sink);
}
 
Example 9
@Test
public void testRequiredProperties() {
	Map<String, String> properties = getBasicProperties();

	final TableSink<?> actual = TableFactoryService.find(TableSinkFactory.class, properties)
			.createTableSink(properties);

	assertTrue(actual instanceof DatahubTableSink);
}
 
Example 10
@Test
public void testSupportedProperties() {
	Map<String, String> properties = getBasicProperties();

	properties.put(CONNECTOR_BATCH_SIZE, "1");
	properties.put(CONNECTOR_BUFFER_SIZE, "1");
	properties.put(CONNECTOR_RETRY_TIMEOUT_IN_MILLS, "3");
	properties.put(CONNECTOR_MAX_RETRY_TIMES, "10");
	properties.put(CONNECTOR_BATCH_WRITE_TIMEOUT_IN_MILLS, "5");

	final TableSink<?> actual = TableFactoryService.find(TableSinkFactory.class, properties)
			.createTableSink(properties);

	assertTrue(actual instanceof DatahubTableSink);
}
 
Example 11
Source Project: Flink-CEPplus   Source File: JDBCAppendTableSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	int[] types = outputFormat.getTypesArray();

	String sinkSchema =
		String.join(", ", IntStream.of(types).mapToObj(JDBCTypeUtil::getTypeName).collect(Collectors.toList()));
	String tableSchema =
		String.join(", ", Stream.of(fieldTypes).map(JDBCTypeUtil::getTypeName).collect(Collectors.toList()));
	String msg = String.format("Schema of output table is incompatible with JDBCAppendTableSink schema. " +
		"Table schema: [%s], sink schema: [%s]", tableSchema, sinkSchema);

	Preconditions.checkArgument(fieldTypes.length == types.length, msg);
	for (int i = 0; i < types.length; ++i) {
		Preconditions.checkArgument(
			JDBCTypeUtil.typeInformationToSqlType(fieldTypes[i]) == types[i],
			msg);
	}

	JDBCAppendTableSink copy;
	try {
		copy = new JDBCAppendTableSink(InstantiationUtil.clone(outputFormat));
	} catch (IOException | ClassNotFoundException e) {
		throw new RuntimeException(e);
	}

	copy.fieldNames = fieldNames;
	copy.fieldTypes = fieldTypes;
	return copy;
}
 
Example 12
Source Project: Flink-CEPplus   Source File: CollectBatchTableSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	final CollectBatchTableSink copy = new CollectBatchTableSink(accumulatorName, serializer);
	copy.fieldNames = fieldNames;
	copy.fieldTypes = fieldTypes;
	return copy;
}
 
Example 13
Source Project: flink-connectors   Source File: FlinkTableITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testBatchTableSinkUsingDescriptor() throws Exception {

    // create a Pravega stream for test purposes
    Stream stream = Stream.of(setupUtils.getScope(), "testBatchTableSinkUsingDescriptor");
    this.setupUtils.createTestStream(stream.getStreamName(), 1);

    // create a Flink Table environment
    ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
    env.setParallelism(1);
    BatchTableEnvironment tableEnv = BatchTableEnvironment.create(env);

    Table table = tableEnv.fromDataSet(env.fromCollection(SAMPLES));

    Pravega pravega = new Pravega();
    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("category")
            .forStream(stream)
            .withPravegaConfig(setupUtils.getPravegaConfig());

    ConnectTableDescriptor desc = tableEnv.connect(pravega)
            .withFormat(new Json().failOnMissingField(true))
            .withSchema(new Schema().field("category", DataTypes.STRING()).
                    field("value", DataTypes.INT()));
    desc.createTemporaryTable("test");

    final Map<String, String> propertiesMap = desc.toProperties();
    final TableSink<?> sink = TableFactoryService.find(BatchTableSinkFactory.class, propertiesMap)
            .createBatchTableSink(propertiesMap);

    String tableSinkPath = tableEnv.getCurrentDatabase() + "." + "PravegaSink";

    ConnectorCatalogTable<?, ?> connectorCatalogSinkTable = ConnectorCatalogTable.sink(sink, true);

    tableEnv.getCatalog(tableEnv.getCurrentCatalog()).get().createTable(
            ObjectPath.fromString(tableSinkPath),
            connectorCatalogSinkTable, false);
    table.insertInto("PravegaSink");
    env.execute();
}
 
Example 14
Source Project: Flink-CEPplus   Source File: CollectStreamTableSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	final CollectStreamTableSink copy = new CollectStreamTableSink(targetAddress, targetPort, serializer);
	copy.fieldNames = fieldNames;
	copy.fieldTypes = fieldTypes;
	return copy;
}
 
Example 15
Source Project: pulsar-flink   Source File: PulsarTableSourceSinkFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> createTableSink(ObjectPath tablePath, CatalogTable table) {
    String topic = PulsarMetadataReader.objectPath2TopicName(tablePath);

    Map<String, String> props = new HashMap<String, String>();
    props.putAll(table.toProperties());
    props.put(CONNECTOR_TOPIC, topic);

    return createStreamTableSink(props);
}
 
Example 16
Source Project: flink   Source File: HBaseUpsertTableSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	if (!Arrays.equals(getFieldNames(), fieldNames) || !Arrays.equals(getFieldTypes(), fieldTypes)) {
		throw new ValidationException("Reconfiguration with different fields is not allowed. " +
			"Expected: " + Arrays.toString(getFieldNames()) + " / " + Arrays.toString(getFieldTypes()) + ". " +
			"But was: " + Arrays.toString(fieldNames) + " / " + Arrays.toString(fieldTypes));
	}

	return new HBaseUpsertTableSink(hbaseTableSchema, hbaseOptions, writeOptions);
}
 
Example 17
Source Project: flink   Source File: HiveTableFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> createTableSink(ObjectPath tablePath, CatalogTable table) {
	Preconditions.checkNotNull(table);
	Preconditions.checkArgument(table instanceof CatalogTableImpl);

	boolean isGeneric = Boolean.valueOf(table.getProperties().get(CatalogConfig.IS_GENERIC));

	if (!isGeneric) {
		return createOutputFormatTableSink(tablePath, table);
	} else {
		return TableFactoryUtil.findAndCreateTableSink(table);
	}
}
 
Example 18
Source Project: flink   Source File: JDBCUpsertTableSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	if (!Arrays.equals(getFieldNames(), fieldNames) || !Arrays.equals(getFieldTypes(), fieldTypes)) {
		throw new ValidationException("Reconfiguration with different fields is not allowed. " +
				"Expected: " + Arrays.toString(getFieldNames()) + " / " + Arrays.toString(getFieldTypes()) + ". " +
				"But was: " + Arrays.toString(fieldNames) + " / " + Arrays.toString(fieldTypes));
	}

	JDBCUpsertTableSink copy = new JDBCUpsertTableSink(schema, options, flushMaxSize, flushIntervalMills, maxRetryTime);
	copy.keyFields = keyFields;
	return copy;
}
 
Example 19
Source Project: flink   Source File: JDBCAppendTableSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	int[] types = outputFormat.getTypesArray();

	String sinkSchema =
		String.join(", ", IntStream.of(types).mapToObj(JDBCTypeUtil::getTypeName).collect(Collectors.toList()));
	String tableSchema =
		String.join(", ", Stream.of(fieldTypes).map(JDBCTypeUtil::getTypeName).collect(Collectors.toList()));
	String msg = String.format("Schema of output table is incompatible with JDBCAppendTableSink schema. " +
		"Table schema: [%s], sink schema: [%s]", tableSchema, sinkSchema);

	Preconditions.checkArgument(fieldTypes.length == types.length, msg);
	for (int i = 0; i < types.length; ++i) {
		Preconditions.checkArgument(
			JDBCTypeUtil.typeInformationToSqlType(fieldTypes[i]) == types[i],
			msg);
	}

	JDBCAppendTableSink copy;
	try {
		copy = new JDBCAppendTableSink(InstantiationUtil.clone(outputFormat));
	} catch (IOException | ClassNotFoundException e) {
		throw new RuntimeException(e);
	}

	copy.fieldNames = fieldNames;
	copy.fieldTypes = fieldTypes;
	return copy;
}
 
Example 20
Source Project: flink   Source File: ConnectorCatalogTable.java    License: Apache License 2.0 5 votes vote down vote up
public static <T1, T2> ConnectorCatalogTable sourceAndSink(
		TableSource<T1> source,
		TableSink<T2> sink,
		boolean isBatch) {
	TableSchema tableSchema = calculateSourceSchema(source, isBatch);
	return new ConnectorCatalogTable<>(source, sink, tableSchema, isBatch);
}
 
Example 21
Source Project: flink   Source File: ConnectorCatalogTable.java    License: Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
protected ConnectorCatalogTable(
		TableSource<T1> tableSource,
		TableSink<T2> tableSink,
		TableSchema tableSchema,
		boolean isBatch) {
	super(tableSchema, Collections.emptyMap(), "");
	this.tableSource = tableSource;
	this.tableSink = tableSink;
	this.isBatch = isBatch;
}
 
Example 22
Source Project: flink   Source File: CatalogManager.java    License: Apache License 2.0 5 votes vote down vote up
private static TableSchema getTableSchema(ExternalCatalogTable externalTable) {
	if (externalTable.isTableSource()) {
		return TableFactoryUtil.findAndCreateTableSource(externalTable).getTableSchema();
	} else {
		TableSink<?> tableSink = TableFactoryUtil.findAndCreateTableSink(externalTable);
		return tableSink.getTableSchema();
	}
}
 
Example 23
Source Project: flink   Source File: TableEnvironmentImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void registerTableSink(
		String name,
		String[] fieldNames,
		TypeInformation<?>[] fieldTypes,
		TableSink<?> tableSink) {
	registerTableSink(name, tableSink.configure(fieldNames, fieldTypes));
}
 
Example 24
Source Project: flink   Source File: TableEnvironmentImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void registerTableSink(String name, TableSink<?> configuredSink) {
	// validate
	if (configuredSink.getTableSchema().getFieldCount() == 0) {
		throw new TableException("Table schema cannot be empty.");
	}

	checkValidTableName(name);
	registerTableSinkInternal(name, configuredSink);
}
 
Example 25
Source Project: flink   Source File: TableFactoryUtil.java    License: Apache License 2.0 5 votes vote down vote up
private static <T> TableSink<T> findAndCreateTableSink(Map<String, String> properties) {
	TableSink tableSink;
	try {
		tableSink = TableFactoryService
			.find(TableSinkFactory.class, properties)
			.createTableSink(properties);
	} catch (Throwable t) {
		throw new TableException("findAndCreateTableSink failed.", t);
	}

	return tableSink;
}
 
Example 26
Source Project: flink   Source File: TableFactoryUtil.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates a table sink for a {@link CatalogTable} using table factory associated with the catalog.
 */
public static Optional<TableSink> createTableSinkForCatalogTable(Catalog catalog, CatalogTable catalogTable, ObjectPath tablePath) {
	TableFactory tableFactory = catalog.getTableFactory().orElse(null);
	if (tableFactory instanceof TableSinkFactory) {
		return Optional.ofNullable(((TableSinkFactory) tableFactory).createTableSink(tablePath, catalogTable));
	}
	return Optional.empty();
}
 
Example 27
Source Project: alchemy   Source File: HbaseTableSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
    HbaseTableSink copy = new HbaseTableSink(this.hbaseProperties);
    copy.fieldNames = Preconditions.checkNotNull(fieldNames, "fieldNames");
    copy.fieldTypes = Preconditions.checkNotNull(fieldTypes, "fieldTypes");
    Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
        "Number of provided field names and types does not match.");
    return copy;
}
 
Example 28
Source Project: alchemy   Source File: Elasticsearch6TableSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
    Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
            "Number of provided field names and types does not match.");
    Elasticsearch6TableSink tableSink = new Elasticsearch6TableSink(this.elasticsearch6Properties);
    if (this.elasticsearch6Properties.getTableSchema() == null) {
        tableSink.schema = new TableSchema(fieldNames, fieldTypes);
    }
    RowTypeInfo rowTypeInfo = new RowTypeInfo(tableSink.schema.getFieldTypes(), tableSink.schema.getFieldNames());
    tableSink.serializationSchema = new JsonRowSerializationSchema(rowTypeInfo);
    return tableSink;
}
 
Example 29
Source Project: alchemy   Source File: FileSystemTableSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
    FileSystemTableSink copy = new FileSystemTableSink(this.filePropereties);
    copy.fieldNames = Preconditions.checkNotNull(fieldNames, "fieldNames");
    copy.fieldTypes = Preconditions.checkNotNull(fieldTypes, "fieldTypes");
    Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
        "Number of provided field names and types does not match.");
    return copy;
}
 
Example 30
Source Project: alchemy   Source File: Elasticsearch5TableSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
    Elasticsearch5TableSink copy = new Elasticsearch5TableSink(this.elasticsearch5Properties);
    copy.fieldNames = Preconditions.checkNotNull(fieldNames, "fieldNames");
    copy.fieldTypes = Preconditions.checkNotNull(fieldTypes, "fieldTypes");
    Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
            "Number of provided field names and types does not match.");

    RowTypeInfo rowSchema = new RowTypeInfo(fieldTypes, fieldNames);
    copy.jsonRowSchema = new JsonRowSerializationSchema(rowSchema);

    return copy;
}