Java Code Examples for org.apache.flink.api.common.serialization.SerializationSchema

The following examples show how to use org.apache.flink.api.common.serialization.SerializationSchema. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink   Source File: Elasticsearch6DynamicSinkFactory.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
	TableSchema tableSchema = context.getCatalogTable().getSchema();
	ElasticsearchValidationUtils.validatePrimaryKey(tableSchema);
	final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);

	final EncodingFormat<SerializationSchema<RowData>> format = helper.discoverEncodingFormat(
		SerializationFormatFactory.class,
		FORMAT_OPTION);

	helper.validate();
	Configuration configuration = new Configuration();
	context.getCatalogTable()
		.getOptions()
		.forEach(configuration::setString);
	Elasticsearch6Configuration config = new Elasticsearch6Configuration(configuration, context.getClassLoader());

	validate(config, configuration);

	return new Elasticsearch6DynamicSink(
		format,
		config,
		TableSchemaUtils.getPhysicalSchema(tableSchema));
}
 
Example 2
Source Project: flink   Source File: SerializationSchemaMatcher.java    License: Apache License 2.0 6 votes vote down vote up
private SerializationWithDeserializationSchemaMatcherBuilder(
	SerializationSchema<Row> serializationSchema,
	DeserializationSchema<Row> deserializationSchema) {
	try {
		// we serialize and deserialize the schema to test runtime behavior
		// when the schema is shipped to the cluster
		this.serializationSchema = deserializeObject(
			serializeObject(serializationSchema),
			this.getClass().getClassLoader());
		this.deserializationSchema = deserializeObject(
			serializeObject(deserializationSchema),
			this.getClass().getClassLoader());
	} catch (IOException | ClassNotFoundException e) {
		throw new RuntimeException(e);
	}
}
 
Example 3
@Override
protected KafkaTableSinkBase getExpectedKafkaTableSink(
		TableSchema schema,
		String topic,
		Properties properties,
		Optional<FlinkKafkaPartitioner<Row>> partitioner,
		SerializationSchema<Row> serializationSchema) {

	return new Kafka011TableSink(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema
	);
}
 
Example 4
private SerializationSchema<Row> getSerializationSchema(Map<String, String> properties) {
	final String formatType = properties.get(FORMAT_TYPE);
	// we could have added this check to the table factory context
	// but this approach allows to throw more helpful error messages
	// if the supported format has not been added
	if (formatType == null || !formatType.equals(SUPPORTED_FORMAT_TYPE)) {
		throw new ValidationException(
			"The Elasticsearch sink requires a '" + SUPPORTED_FORMAT_TYPE + "' format.");
	}

	@SuppressWarnings("unchecked")
	final SerializationSchemaFactory<Row> formatFactory = TableFactoryService.find(
		SerializationSchemaFactory.class,
		properties,
		this.getClass().getClassLoader());
	return formatFactory.createSerializationSchema(properties);
}
 
Example 5
public ElasticsearchUpsertSinkFunction(
		String index,
		String docType,
		String keyDelimiter,
		String keyNullLiteral,
		SerializationSchema<Row> serializationSchema,
		XContentType contentType,
		RequestFactory requestFactory,
		int[] keyFieldIndices) {

	this.index = Preconditions.checkNotNull(index);
	this.docType = Preconditions.checkNotNull(docType);
	this.keyDelimiter = Preconditions.checkNotNull(keyDelimiter);
	this.serializationSchema = Preconditions.checkNotNull(serializationSchema);
	this.contentType = Preconditions.checkNotNull(contentType);
	this.keyFieldIndices = Preconditions.checkNotNull(keyFieldIndices);
	this.requestFactory = Preconditions.checkNotNull(requestFactory);
	this.keyNullLiteral = Preconditions.checkNotNull(keyNullLiteral);
}
 
Example 6
Source Project: flink   Source File: Elasticsearch7DynamicSinkFactory.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
	TableSchema tableSchema = context.getCatalogTable().getSchema();
	ElasticsearchValidationUtils.validatePrimaryKey(tableSchema);

	final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);

	final EncodingFormat<SerializationSchema<RowData>> format = helper.discoverEncodingFormat(
		SerializationFormatFactory.class,
		FORMAT_OPTION);

	helper.validate();
	Configuration configuration = new Configuration();
	context.getCatalogTable()
		.getOptions()
		.forEach(configuration::setString);
	Elasticsearch7Configuration config = new Elasticsearch7Configuration(configuration, context.getClassLoader());

	validate(config, configuration);

	return new Elasticsearch7DynamicSink(
		format,
		config,
		TableSchemaUtils.getPhysicalSchema(tableSchema));
}
 
Example 7
@Override
protected ElasticsearchUpsertTableSinkBase getExpectedTableSink(
		boolean isAppendOnly,
		TableSchema schema,
		List<Host> hosts,
		String index,
		String docType,
		String keyDelimiter,
		String keyNullLiteral,
		SerializationSchema<Row> serializationSchema,
		XContentType contentType,
		ActionRequestFailureHandler failureHandler,
		Map<SinkOption, String> sinkOptions,
		IndexGenerator indexGenerator) {
	return new Elasticsearch7UpsertTableSink(
		isAppendOnly,
		schema,
		hosts,
		index,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions);
}
 
Example 8
public TestElasticsearch6UpsertTableSink(
		boolean isAppendOnly,
		TableSchema schema,
		List<Host> hosts,
		String index,
		String docType,
		String keyDelimiter,
		String keyNullLiteral,
		SerializationSchema<Row> serializationSchema,
		XContentType contentType,
		ActionRequestFailureHandler failureHandler,
		Map<SinkOption, String> sinkOptions) {

	super(
		isAppendOnly,
		schema,
		hosts,
		index,
		docType,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions);
}
 
Example 9
Source Project: flink   Source File: Kafka010TableSourceSinkFactoryTest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected KafkaTableSinkBase getExpectedKafkaTableSink(
		TableSchema schema,
		String topic,
		Properties properties,
		Optional<FlinkKafkaPartitioner<Row>> partitioner,
		SerializationSchema<Row> serializationSchema) {

	return new Kafka010TableSink(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema
	);
}
 
Example 10
Source Project: flink-connectors   Source File: FlinkPravegaWriter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * The flink pravega writer instance which can be added as a sink to a Flink job.
 *
 * @param clientConfig          The Pravega client configuration.
 * @param stream                The destination stream.
 * @param serializationSchema   The implementation for serializing every event into pravega's storage format.
 * @param eventRouter           The implementation to extract the partition key from the event.
 * @param writerMode            The Pravega writer mode.
 * @param txnLeaseRenewalPeriod Transaction lease renewal period in milliseconds.
 * @param enableWatermark       Flag to indicate whether Pravega watermark needs to be enabled or not.
 * @param enableMetrics         Flag to indicate whether metrics needs to be enabled or not.
 */
protected FlinkPravegaWriter(
        final ClientConfig clientConfig,
        final Stream stream,
        final SerializationSchema<T> serializationSchema,
        final PravegaEventRouter<T> eventRouter,
        final PravegaWriterMode writerMode,
        final long txnLeaseRenewalPeriod,
        final boolean enableWatermark,
        final boolean enableMetrics) {

    this.clientConfig = Preconditions.checkNotNull(clientConfig, "clientConfig");
    this.stream = Preconditions.checkNotNull(stream, "stream");
    this.serializationSchema = Preconditions.checkNotNull(serializationSchema, "serializationSchema");
    this.eventRouter = Preconditions.checkNotNull(eventRouter, "eventRouter");
    this.writerMode = Preconditions.checkNotNull(writerMode, "writerMode");
    Preconditions.checkArgument(txnLeaseRenewalPeriod > 0, "txnLeaseRenewalPeriod must be > 0");
    this.txnLeaseRenewalPeriod = txnLeaseRenewalPeriod;
    this.enableWatermark = enableWatermark;
    this.enableMetrics = enableMetrics;
    this.writerIdPrefix = UUID.randomUUID().toString();
}
 
Example 11
@Override
protected KafkaTableSinkBase createKafkaTableSink(
		TableSchema schema,
		String topic,
		Properties properties,
		Optional<FlinkKafkaPartitioner<Row>> partitioner,
		SerializationSchema<Row> serializationSchema) {

	return new Kafka09TableSink(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema);
}
 
Example 12
Source Project: flink   Source File: TestFormatFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public EncodingFormat<SerializationSchema<RowData>> createEncodingFormat(
		DynamicTableFactory.Context context,
		ReadableConfig formatConfig) {
	FactoryUtil.validateFactoryOptions(this, formatConfig);
	return new EncodingFormatMock(formatConfig.get(DELIMITER));
}
 
Example 13
Source Project: Flink-CEPplus   Source File: Kafka09TableSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected FlinkKafkaProducerBase<Row> createKafkaProducer(
		String topic,
		Properties properties,
		SerializationSchema<Row> serializationSchema,
		Optional<FlinkKafkaPartitioner<Row>> partitioner) {
	return new FlinkKafkaProducer09<>(
		topic,
		serializationSchema,
		properties,
		partitioner.orElse(null));
}
 
Example 14
Source Project: flink   Source File: KafkaTableSourceSinkFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected KafkaTableSinkBase createKafkaTableSink(
	TableSchema schema,
	String topic,
	Properties properties,
	Optional<FlinkKafkaPartitioner<Row>> partitioner,
	SerializationSchema<Row> serializationSchema) {

	return new KafkaTableSink(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema);
}
 
Example 15
Source Project: flink   Source File: KafkaTableSink.java    License: Apache License 2.0 5 votes vote down vote up
public KafkaTableSink(
	TableSchema schema,
	String topic,
	Properties properties,
	Optional<FlinkKafkaPartitioner<Row>> partitioner,
	SerializationSchema<Row> serializationSchema) {

	super(schema, topic, properties, partitioner, serializationSchema);
}
 
Example 16
Source Project: flink   Source File: Kafka08TableSourceSinkFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected KafkaTableSinkBase createKafkaTableSink(
		TableSchema schema,
		String topic,
		Properties properties,
		Optional<FlinkKafkaPartitioner<Row>> partitioner,
		SerializationSchema<Row> serializationSchema) {

	return new Kafka08TableSink(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema);
}
 
Example 17
Source Project: alchemy   Source File: Elasticsearch6SinkFunction.java    License: Apache License 2.0 5 votes vote down vote up
public Elasticsearch6SinkFunction(String index, Integer fieldIndex, String docType, String keyDelimiter, String keyNullLiteral, SerializationSchema<Row> serializationSchema, XContentType contentType, ElasticsearchUpsertTableSinkBase.RequestFactory requestFactory, int[] keyFieldIndices) {
    this.index = index;
    this.fieldIndex = fieldIndex;
    this.docType = Preconditions.checkNotNull(docType);
    this.keyDelimiter = Preconditions.checkNotNull(keyDelimiter);
    this.serializationSchema = Preconditions.checkNotNull(serializationSchema);
    this.contentType = Preconditions.checkNotNull(contentType);
    this.keyFieldIndices = Preconditions.checkNotNull(keyFieldIndices);
    this.requestFactory = Preconditions.checkNotNull(requestFactory);
    this.keyNullLiteral = Preconditions.checkNotNull(keyNullLiteral);
}
 
Example 18
Source Project: flink   Source File: KafkaDynamicSink.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected SinkFunction<RowData> createKafkaProducer(
		String topic,
		Properties properties,
		SerializationSchema<RowData> serializationSchema,
		Optional<FlinkKafkaPartitioner<RowData>> partitioner) {
	return new FlinkKafkaProducer<>(
			topic,
			serializationSchema,
			properties,
			partitioner);
}
 
Example 19
@Override
protected ElasticsearchUpsertTableSinkBase createElasticsearchUpsertTableSink(
		boolean isAppendOnly,
		TableSchema schema,
		List<Host> hosts,
		String index,
		String docType,
		String keyDelimiter,
		String keyNullLiteral,
		SerializationSchema<Row> serializationSchema,
		XContentType contentType,
		ActionRequestFailureHandler failureHandler,
		Map<SinkOption, String> sinkOptions) {

	return new Elasticsearch6UpsertTableSink(
		isAppendOnly,
		schema,
		hosts,
		index,
		docType,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions);
}
 
Example 20
Source Project: alchemy   Source File: Kafka010SinkDescriptor.java    License: Apache License 2.0 5 votes vote down vote up
@Override KafkaTableSinkBase newTableSink(TableSchema schema, String topic, Properties properties,
    Optional<FlinkKafkaPartitioner<Row>> partitioner, SerializationSchema<Row> serializationSchema) {
    return new Kafka010TableSink(
        schema,
        topic,
        properties,
        partitioner,
        serializationSchema
    );
}
 
Example 21
Source Project: flink   Source File: AvroRowFormatFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public SerializationSchema<Row> createSerializationSchema(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	// create and configure
	if (descriptorProperties.containsKey(AvroValidator.FORMAT_RECORD_CLASS)) {
		return new AvroRowSerializationSchema(
			descriptorProperties.getClass(AvroValidator.FORMAT_RECORD_CLASS, SpecificRecord.class));
	} else {
		return new AvroRowSerializationSchema(descriptorProperties.getString(AvroValidator.FORMAT_AVRO_SCHEMA));
	}
}
 
Example 22
Source Project: Flink-CEPplus   Source File: KafkaTableSourceSinkFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Override
protected KafkaTableSinkBase createKafkaTableSink(
	TableSchema schema,
	String topic,
	Properties properties,
	Optional<FlinkKafkaPartitioner<Row>> partitioner,
	SerializationSchema<Row> serializationSchema) {

	return new KafkaTableSink(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema);
}
 
Example 23
@Override
protected KafkaTableSinkBase getExpectedKafkaTableSink(
	TableSchema schema,
	String topic,
	Properties properties,
	Optional<FlinkKafkaPartitioner<Row>> partitioner,
	SerializationSchema<Row> serializationSchema) {

	return new KafkaTableSink(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema);
}
 
Example 24
Source Project: tutorials   Source File: BackupCreatorIntegrationTest.java    License: MIT License 5 votes vote down vote up
@Test
public void givenProperBackupObject_whenSerializeIsInvoked_thenObjectIsProperlySerialized() throws IOException {
    InputMessage message = new InputMessage("Me", "User", LocalDateTime.now(), "Test Message");
    List<InputMessage> messages = Arrays.asList(message);
    Backup backup = new Backup(messages, LocalDateTime.now());
    byte[] backupSerialized = mapper.writeValueAsBytes(backup);
    SerializationSchema<Backup> serializationSchema = new BackupSerializationSchema();
    byte[] backupProcessed = serializationSchema.serialize(backup);
    
    assertArrayEquals(backupSerialized, backupProcessed);
}
 
Example 25
Source Project: flink   Source File: AvroSerializationSchemaTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSpecificRecordWithConfluentSchemaRegistry() throws Exception {
	SerializationSchema<Address> deserializer = AvroSerializationSchema.forSpecific(Address.class);

	byte[] encodedAddress = writeRecord(address, Address.getClassSchema());
	byte[] serializedAddress = deserializer.serialize(address);
	assertArrayEquals(encodedAddress, serializedAddress);
}
 
Example 26
Source Project: flink   Source File: KafkaTableSink.java    License: Apache License 2.0 5 votes vote down vote up
public KafkaTableSink(
	TableSchema schema,
	String topic,
	Properties properties,
	Optional<FlinkKafkaPartitioner<Row>> partitioner,
	SerializationSchema<Row> serializationSchema) {

	super(schema, topic, properties, partitioner, serializationSchema);
}
 
Example 27
Source Project: flink   Source File: AvroSerializationSchemaTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testGenericRecord() throws Exception {
	SerializationSchema<GenericRecord> deserializationSchema =
		AvroSerializationSchema.forGeneric(
			address.getSchema()
		);

	byte[] encodedAddress = writeRecord(address, Address.getClassSchema());
	byte[] dataSerialized = deserializationSchema.serialize(address);
	assertArrayEquals(encodedAddress, dataSerialized);
}
 
Example 28
/**
 * Creates the sink function for the current builder state.
 *
 * @param serializationSchema the deserialization schema to use.
 * @param eventRouter the event router to use.
 */
protected FlinkPravegaWriter<T> createSinkFunction(SerializationSchema<T> serializationSchema, PravegaEventRouter<T> eventRouter) {
    Preconditions.checkNotNull(serializationSchema, "serializationSchema");
    Preconditions.checkNotNull(eventRouter, "eventRouter");
    return new FlinkPravegaWriter<>(
            getPravegaConfig().getClientConfig(),
            resolveStream(),
            serializationSchema,
            eventRouter,
            writerMode,
            txnLeaseRenewalPeriod.toMilliseconds(),
            enableWatermark,
            isMetricsEnabled());
}
 
Example 29
Source Project: flink   Source File: Elasticsearch6UpsertTableSink.java    License: Apache License 2.0 5 votes vote down vote up
public Elasticsearch6UpsertTableSink(
		boolean isAppendOnly,
		TableSchema schema,
		List<Host> hosts,
		String index,
		String docType,
		String keyDelimiter,
		String keyNullLiteral,
		SerializationSchema<Row> serializationSchema,
		XContentType contentType,
		ActionRequestFailureHandler failureHandler,
		Map<SinkOption, String> sinkOptions) {

	super(
		isAppendOnly,
		schema,
		hosts,
		index,
		docType,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions,
		UPDATE_REQUEST_FACTORY);
}
 
Example 30
@Override
protected ElasticsearchUpsertTableSinkBase createElasticsearchUpsertTableSink(
		boolean isAppendOnly,
		TableSchema schema,
		List<Host> hosts,
		String index,
		String docType,
		String keyDelimiter,
		String keyNullLiteral,
		SerializationSchema<Row> serializationSchema,
		XContentType contentType,
		ActionRequestFailureHandler failureHandler,
		Map<SinkOption, String> sinkOptions) {

	return new Elasticsearch6UpsertTableSink(
		isAppendOnly,
		schema,
		hosts,
		index,
		docType,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions);
}