org.apache.flink.table.sinks.TableSink Java Examples

The following examples show how to use org.apache.flink.table.sinks.TableSink. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: DatahubTableFactory.java    From alibaba-flink-connectors with Apache License 2.0 6 votes vote down vote up
@Override
public TableSink<Row> createTableSink(Map<String, String> prop) {
	DescriptorProperties params = new DescriptorProperties();
	params.putProperties(prop);

	new DatahubDescriptorValidator().validate(params);

	TableSchema schema = params.getTableSchema(SCHEMA);

	String project = params.getString(CONNECTOR_PROJECT);
	String topic = params.getString(CONNECTOR_TOPIC);
	String accessId = params.getString(CONNECTOR_ACCESS_ID);
	String accessKey = params.getString(CONNECTOR_ACCESS_KEY);
	String endpoint = params.getString(CONNECTOR_ENDPOINT);

	return new DatahubTableSink(
			project,
			topic,
			accessId,
			accessKey,
			endpoint,
			schema,
			params
	);
}
 
Example #2
Source File: FlinkPravegaTableFactoryTest.java    From flink-connectors with Apache License 2.0 6 votes vote down vote up
@Test
public void testValidWriterModeExactlyOnce() {
    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);

    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("name").withWriterMode(PravegaWriterMode.EXACTLY_ONCE)
            .forStream(stream)
            .withPravegaConfig(PRAVEGA_CONFIG);

    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(JSON)
            .withSchema(SCHEMA)
            .inAppendMode();

    final Map<String, String> propertiesMap = testDesc.toProperties();
    final TableSink<?> sink = TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
            .createStreamTableSink(propertiesMap);
    assertNotNull(sink);
}
 
Example #3
Source File: TableEnvironmentImpl.java    From flink with Apache License 2.0 6 votes vote down vote up
private void registerTableSinkInternal(String name, TableSink<?> tableSink) {
	Optional<CatalogBaseTable> table = getCatalogTable(catalogManager.getBuiltInCatalogName(),
		catalogManager.getBuiltInDatabaseName(), name);

	if (table.isPresent()) {
		if (table.get() instanceof ConnectorCatalogTable<?, ?>) {
			ConnectorCatalogTable<?, ?> sourceSinkTable = (ConnectorCatalogTable<?, ?>) table.get();
			if (sourceSinkTable.getTableSink().isPresent()) {
				throw new ValidationException(String.format(
					"Table '%s' already exists. Please choose a different name.", name));
			} else {
				// wrapper contains only sink (not source)
				replaceTableInternal(
					name,
					ConnectorCatalogTable
						.sourceAndSink(sourceSinkTable.getTableSource().get(), tableSink, !IS_STREAM_TABLE));
			}
		} else {
			throw new ValidationException(String.format(
				"Table '%s' already exists. Please choose a different name.", name));
		}
	} else {
		registerTableInternal(name, ConnectorCatalogTable.sink(tableSink, !IS_STREAM_TABLE));
	}
}
 
Example #4
Source File: ElasticsearchUpsertTableSinkBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	if (!Arrays.equals(getFieldNames(), fieldNames) || !Arrays.equals(getFieldTypes(), fieldTypes)) {
		throw new ValidationException("Reconfiguration with different fields is not allowed. " +
			"Expected: " + Arrays.toString(getFieldNames()) + " / " + Arrays.toString(getFieldTypes()) + ". " +
			"But was: " + Arrays.toString(fieldNames) + " / " + Arrays.toString(fieldTypes));
	}
	return copy(
		isAppendOnly,
		schema,
		hosts,
		index,
		docType,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions,
		requestFactory);
}
 
Example #5
Source File: HiveTableFactoryTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testHiveTable() throws Exception {
	TableSchema schema = TableSchema.builder()
		.field("name", DataTypes.STRING())
		.field("age", DataTypes.INT())
		.build();

	Map<String, String> properties = new HashMap<>();

	catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true);
	ObjectPath path = new ObjectPath("mydb", "mytable");
	CatalogTable table = new CatalogTableImpl(schema, properties, "hive table");
	catalog.createTable(path, table, true);
	Optional<TableFactory> opt = catalog.getTableFactory();
	assertTrue(opt.isPresent());
	HiveTableFactory tableFactory = (HiveTableFactory) opt.get();
	TableSink tableSink = tableFactory.createTableSink(path, table);
	assertTrue(tableSink instanceof HiveTableSink);
	TableSource tableSource = tableFactory.createTableSource(path, table);
	assertTrue(tableSource instanceof HiveTableSource);
}
 
Example #6
Source File: HiveTableFactoryTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testGenericTable() throws Exception {
	TableSchema schema = TableSchema.builder()
		.field("name", DataTypes.STRING())
		.field("age", DataTypes.INT())
		.build();

	Map<String, String> properties = new HashMap<>();
	properties.put(CatalogConfig.IS_GENERIC, String.valueOf(true));
	properties.put("connector", "COLLECTION");

	catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true);
	ObjectPath path = new ObjectPath("mydb", "mytable");
	CatalogTable table = new CatalogTableImpl(schema, properties, "csv table");
	catalog.createTable(path, table, true);
	Optional<TableFactory> opt = catalog.getTableFactory();
	assertTrue(opt.isPresent());
	HiveTableFactory tableFactory = (HiveTableFactory) opt.get();
	TableSource tableSource = tableFactory.createTableSource(path, table);
	assertTrue(tableSource instanceof StreamTableSource);
	TableSink tableSink = tableFactory.createTableSink(path, table);
	assertTrue(tableSink instanceof StreamTableSink);
}
 
Example #7
Source File: FlinkPravegaTableFactoryTest.java    From flink-connectors with Apache License 2.0 6 votes vote down vote up
@Test
public void testValidWriterModeAtleastOnce() {
    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);

    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("name").withWriterMode(PravegaWriterMode.ATLEAST_ONCE)
            .forStream(stream)
            .withPravegaConfig(PRAVEGA_CONFIG);

    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(JSON)
            .withSchema(SCHEMA)
            .inAppendMode();

    final Map<String, String> propertiesMap = testDesc.toProperties();
    final TableSink<?> sink = TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
            .createStreamTableSink(propertiesMap);
    assertNotNull(sink);
}
 
Example #8
Source File: ElasticsearchUpsertTableSinkBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	if (!Arrays.equals(getFieldNames(), fieldNames) || !Arrays.equals(getFieldTypes(), fieldTypes)) {
		throw new ValidationException("Reconfiguration with different fields is not allowed. " +
			"Expected: " + Arrays.toString(getFieldNames()) + " / " + Arrays.toString(getFieldTypes()) + ". " +
			"But was: " + Arrays.toString(fieldNames) + " / " + Arrays.toString(fieldTypes));
	}
	return copy(
		isAppendOnly,
		schema,
		hosts,
		index,
		docType,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions,
		requestFactory);
}
 
Example #9
Source File: JDBCUpsertTableSink.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	if (!Arrays.equals(getFieldNames(), fieldNames) || !Arrays.equals(getFieldTypes(), fieldTypes)) {
		throw new ValidationException("Reconfiguration with different fields is not allowed. " +
				"Expected: " + Arrays.toString(getFieldNames()) + " / " + Arrays.toString(getFieldTypes()) + ". " +
				"But was: " + Arrays.toString(fieldNames) + " / " + Arrays.toString(fieldTypes));
	}

	JDBCUpsertTableSink copy = new JDBCUpsertTableSink(schema, options, flushMaxSize, flushIntervalMills, maxRetryTime);
	copy.keyFields = keyFields;
	return copy;
}
 
Example #10
Source File: JDBCAppendTableSink.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	int[] types = outputFormat.getTypesArray();

	String sinkSchema =
		String.join(", ", IntStream.of(types).mapToObj(JDBCTypeUtil::getTypeName).collect(Collectors.toList()));
	String tableSchema =
		String.join(", ", Stream.of(fieldTypes).map(JDBCTypeUtil::getTypeName).collect(Collectors.toList()));
	String msg = String.format("Schema of output table is incompatible with JDBCAppendTableSink schema. " +
		"Table schema: [%s], sink schema: [%s]", tableSchema, sinkSchema);

	Preconditions.checkArgument(fieldTypes.length == types.length, msg);
	for (int i = 0; i < types.length; ++i) {
		Preconditions.checkArgument(
			JDBCTypeUtil.typeInformationToSqlType(fieldTypes[i]) == types[i],
			msg);
	}

	JDBCAppendTableSink copy;
	try {
		copy = new JDBCAppendTableSink(InstantiationUtil.clone(outputFormat));
	} catch (IOException | ClassNotFoundException e) {
		throw new RuntimeException(e);
	}

	copy.fieldNames = fieldNames;
	copy.fieldTypes = fieldTypes;
	return copy;
}
 
Example #11
Source File: ConnectorCatalogTable.java    From flink with Apache License 2.0 5 votes vote down vote up
public static <T1, T2> ConnectorCatalogTable sourceAndSink(
		TableSource<T1> source,
		TableSink<T2> sink,
		boolean isBatch) {
	TableSchema tableSchema = calculateSourceSchema(source, isBatch);
	return new ConnectorCatalogTable<>(source, sink, tableSchema, isBatch);
}
 
Example #12
Source File: ConnectorCatalogTable.java    From flink with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
protected ConnectorCatalogTable(
		TableSource<T1> tableSource,
		TableSink<T2> tableSink,
		TableSchema tableSchema,
		boolean isBatch) {
	super(tableSchema, Collections.emptyMap(), "");
	this.tableSource = tableSource;
	this.tableSink = tableSink;
	this.isBatch = isBatch;
}
 
Example #13
Source File: CollectStreamTableSink.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
	final CollectStreamTableSink copy = new CollectStreamTableSink(targetAddress, targetPort, serializer);
	copy.fieldNames = fieldNames;
	copy.fieldTypes = fieldTypes;
	return copy;
}
 
Example #14
Source File: TableEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void registerTableSink(
		String name,
		String[] fieldNames,
		TypeInformation<?>[] fieldTypes,
		TableSink<?> tableSink) {
	registerTableSink(name, tableSink.configure(fieldNames, fieldTypes));
}
 
Example #15
Source File: TableEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void registerTableSink(String name, TableSink<?> configuredSink) {
	// validate
	if (configuredSink.getTableSchema().getFieldCount() == 0) {
		throw new TableException("Table schema cannot be empty.");
	}

	checkValidTableName(name);
	registerTableSinkInternal(name, configuredSink);
}
 
Example #16
Source File: CatalogManager.java    From flink with Apache License 2.0 5 votes vote down vote up
private static TableSchema getTableSchema(ExternalCatalogTable externalTable) {
	if (externalTable.isTableSource()) {
		return TableFactoryUtil.findAndCreateTableSource(externalTable).getTableSchema();
	} else {
		TableSink<?> tableSink = TableFactoryUtil.findAndCreateTableSink(externalTable);
		return tableSink.getTableSchema();
	}
}
 
Example #17
Source File: DatahubTableFactoryTest.java    From alibaba-flink-connectors with Apache License 2.0 5 votes vote down vote up
@Test
public void testSupportedProperties() {
	Map<String, String> properties = getBasicProperties();

	properties.put(CONNECTOR_BATCH_SIZE, "1");
	properties.put(CONNECTOR_BUFFER_SIZE, "1");
	properties.put(CONNECTOR_RETRY_TIMEOUT_IN_MILLS, "3");
	properties.put(CONNECTOR_MAX_RETRY_TIMES, "10");
	properties.put(CONNECTOR_BATCH_WRITE_TIMEOUT_IN_MILLS, "5");

	final TableSink<?> actual = TableFactoryService.find(TableSinkFactory.class, properties)
			.createTableSink(properties);

	assertTrue(actual instanceof DatahubTableSink);
}
 
Example #18
Source File: TableFactoryUtil.java    From flink with Apache License 2.0 5 votes vote down vote up
private static <T> TableSink<T> findAndCreateTableSink(Map<String, String> properties) {
	TableSink tableSink;
	try {
		tableSink = TableFactoryService
			.find(TableSinkFactory.class, properties)
			.createTableSink(properties);
	} catch (Throwable t) {
		throw new TableException("findAndCreateTableSink failed.", t);
	}

	return tableSink;
}
 
Example #19
Source File: TableFactoryUtil.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a table sink for a {@link CatalogTable} using table factory associated with the catalog.
 */
public static Optional<TableSink> createTableSinkForCatalogTable(Catalog catalog, CatalogTable catalogTable, ObjectPath tablePath) {
	TableFactory tableFactory = catalog.getTableFactory().orElse(null);
	if (tableFactory instanceof TableSinkFactory) {
		return Optional.ofNullable(((TableSinkFactory) tableFactory).createTableSink(tablePath, catalogTable));
	}
	return Optional.empty();
}
 
Example #20
Source File: HbaseTableSink.java    From alchemy with Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
    HbaseTableSink copy = new HbaseTableSink(this.hbaseProperties);
    copy.fieldNames = Preconditions.checkNotNull(fieldNames, "fieldNames");
    copy.fieldTypes = Preconditions.checkNotNull(fieldTypes, "fieldTypes");
    Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
        "Number of provided field names and types does not match.");
    return copy;
}
 
Example #21
Source File: Elasticsearch6TableSink.java    From alchemy with Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
    Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
            "Number of provided field names and types does not match.");
    Elasticsearch6TableSink tableSink = new Elasticsearch6TableSink(this.elasticsearch6Properties);
    if (this.elasticsearch6Properties.getTableSchema() == null) {
        tableSink.schema = new TableSchema(fieldNames, fieldTypes);
    }
    RowTypeInfo rowTypeInfo = new RowTypeInfo(tableSink.schema.getFieldTypes(), tableSink.schema.getFieldNames());
    tableSink.serializationSchema = new JsonRowSerializationSchema(rowTypeInfo);
    return tableSink;
}
 
Example #22
Source File: FileSystemTableSink.java    From alchemy with Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
    FileSystemTableSink copy = new FileSystemTableSink(this.filePropereties);
    copy.fieldNames = Preconditions.checkNotNull(fieldNames, "fieldNames");
    copy.fieldTypes = Preconditions.checkNotNull(fieldTypes, "fieldTypes");
    Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
        "Number of provided field names and types does not match.");
    return copy;
}
 
Example #23
Source File: Elasticsearch5TableSink.java    From alchemy with Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
    Elasticsearch5TableSink copy = new Elasticsearch5TableSink(this.elasticsearch5Properties);
    copy.fieldNames = Preconditions.checkNotNull(fieldNames, "fieldNames");
    copy.fieldTypes = Preconditions.checkNotNull(fieldTypes, "fieldTypes");
    Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
            "Number of provided field names and types does not match.");

    RowTypeInfo rowSchema = new RowTypeInfo(fieldTypes, fieldNames);
    copy.jsonRowSchema = new JsonRowSerializationSchema(rowSchema);

    return copy;
}
 
Example #24
Source File: TsdbTableSink.java    From alchemy with Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
    TsdbTableSink copy = new TsdbTableSink(this.tsdbProperties);
    copy.fieldNames = Preconditions.checkNotNull(fieldNames, "fieldNames");
    copy.fieldTypes = Preconditions.checkNotNull(fieldTypes, "fieldTypes");
    Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
            "Number of provided field names and types does not match.");
    return copy;
}
 
Example #25
Source File: RedisTableSink.java    From alchemy with Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Tuple2<Boolean, Row>> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
    RedisTableSink copy = new RedisTableSink(redisProperties);
    copy.fieldNames = Preconditions.checkNotNull(fieldNames, "fieldNames");
    copy.fieldTypes = Preconditions.checkNotNull(fieldTypes, "fieldTypes");
    Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
            "Number of provided field names and types does not match.");
    return copy;
}
 
Example #26
Source File: DubboTableSink.java    From alchemy with Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
    DubboTableSink copy = new DubboTableSink(this.dubboProperties);
    copy.fieldNames = Preconditions.checkNotNull(fieldNames, "fieldNames");
    copy.fieldTypes = Preconditions.checkNotNull(fieldTypes, "fieldTypes");
    Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
        "Number of provided field names and types does not match.");
    return copy;
}
 
Example #27
Source File: PrintTableSink.java    From alchemy with Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {
    PrintTableSink copy = new PrintTableSink();
    copy.fieldNames = Preconditions.checkNotNull(fieldNames, "fieldNames");
    copy.fieldTypes = Preconditions.checkNotNull(fieldTypes, "fieldTypes");
    Preconditions.checkArgument(fieldNames.length == fieldTypes.length,
        "Number of provided field names and types does not match.");
    return copy;
}
 
Example #28
Source File: AbstractFlinkClient.java    From alchemy with Apache License 2.0 5 votes vote down vote up
private void registerSink(Table table, SinkDescriptor sinkDescriptor)
    throws Exception {
    TableSchema tableSchema = table.getSchema();
    TableSink tableSink = sinkDescriptor.transform(tableSchema);
    table.writeToSink(tableSink);
    LOGGER.info("register sink, name:{}, class:{}", sinkDescriptor.getName(), sinkDescriptor.getClass());
}
 
Example #29
Source File: FlinkTaskInstanceBusiness.java    From PoseidonX with Apache License 2.0 5 votes vote down vote up
/**
 * 处理具体sql业务逻辑
 * @param flinkComponent
 */
private void dealFlinkTaskLogicComponent(FlinkTaskLogicComponent flinkComponent) {

    System.out.println(flinkComponent.getLogicSql());

    Table resultTable = tEnv.sql( flinkComponent.getLogicSql());
    TableSink tableSink =  tableSinkMap.get(flinkComponent.getTargetOutputComponentName());

    resultTable.printSchema();
    resultTable.writeToSink(tableSink);
}
 
Example #30
Source File: HBaseJsonTableSink.java    From PoseidonX with Apache License 2.0 5 votes vote down vote up
@Override
public TableSink<Row> configure(String[] fieldNames, TypeInformation<?>[] fieldTypes) {

    HBaseJsonTableSink hBaseJsonTableSink = new HBaseJsonTableSink(properties);
    hBaseJsonTableSink.fieldNames = fieldNames;
    hBaseJsonTableSink.fieldTypes = fieldTypes;

    return hBaseJsonTableSink;
}