org.apache.flink.api.common.serialization.SerializationSchema Java Examples

The following examples show how to use org.apache.flink.api.common.serialization.SerializationSchema. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SerializationSchemaMatcher.java    From flink with Apache License 2.0 6 votes vote down vote up
private SerializationWithDeserializationSchemaMatcherBuilder(
	SerializationSchema<Row> serializationSchema,
	DeserializationSchema<Row> deserializationSchema) {
	try {
		// we serialize and deserialize the schema to test runtime behavior
		// when the schema is shipped to the cluster
		this.serializationSchema = deserializeObject(
			serializeObject(serializationSchema),
			this.getClass().getClassLoader());
		this.deserializationSchema = deserializeObject(
			serializeObject(deserializationSchema),
			this.getClass().getClassLoader());
	} catch (IOException | ClassNotFoundException e) {
		throw new RuntimeException(e);
	}
}
 
Example #2
Source File: Kafka011TableSourceSinkFactoryTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
protected KafkaTableSinkBase getExpectedKafkaTableSink(
		TableSchema schema,
		String topic,
		Properties properties,
		Optional<FlinkKafkaPartitioner<Row>> partitioner,
		SerializationSchema<Row> serializationSchema) {

	return new Kafka011TableSink(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema
	);
}
 
Example #3
Source File: Elasticsearch7UpsertTableSinkFactoryTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected ElasticsearchUpsertTableSinkBase getExpectedTableSink(
		boolean isAppendOnly,
		TableSchema schema,
		List<Host> hosts,
		String index,
		String docType,
		String keyDelimiter,
		String keyNullLiteral,
		SerializationSchema<Row> serializationSchema,
		XContentType contentType,
		ActionRequestFailureHandler failureHandler,
		Map<SinkOption, String> sinkOptions,
		IndexGenerator indexGenerator) {
	return new Elasticsearch7UpsertTableSink(
		isAppendOnly,
		schema,
		hosts,
		index,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions);
}
 
Example #4
Source File: ElasticsearchUpsertTableSinkFactoryBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private SerializationSchema<Row> getSerializationSchema(Map<String, String> properties) {
	final String formatType = properties.get(FORMAT_TYPE);
	// we could have added this check to the table factory context
	// but this approach allows to throw more helpful error messages
	// if the supported format has not been added
	if (formatType == null || !formatType.equals(SUPPORTED_FORMAT_TYPE)) {
		throw new ValidationException(
			"The Elasticsearch sink requires a '" + SUPPORTED_FORMAT_TYPE + "' format.");
	}

	@SuppressWarnings("unchecked")
	final SerializationSchemaFactory<Row> formatFactory = TableFactoryService.find(
		SerializationSchemaFactory.class,
		properties,
		this.getClass().getClassLoader());
	return formatFactory.createSerializationSchema(properties);
}
 
Example #5
Source File: FlinkPravegaWriter.java    From flink-connectors with Apache License 2.0 6 votes vote down vote up
/**
 * The flink pravega writer instance which can be added as a sink to a Flink job.
 *
 * @param clientConfig          The Pravega client configuration.
 * @param stream                The destination stream.
 * @param serializationSchema   The implementation for serializing every event into pravega's storage format.
 * @param eventRouter           The implementation to extract the partition key from the event.
 * @param writerMode            The Pravega writer mode.
 * @param txnLeaseRenewalPeriod Transaction lease renewal period in milliseconds.
 * @param enableWatermark       Flag to indicate whether Pravega watermark needs to be enabled or not.
 * @param enableMetrics         Flag to indicate whether metrics needs to be enabled or not.
 */
protected FlinkPravegaWriter(
        final ClientConfig clientConfig,
        final Stream stream,
        final SerializationSchema<T> serializationSchema,
        final PravegaEventRouter<T> eventRouter,
        final PravegaWriterMode writerMode,
        final long txnLeaseRenewalPeriod,
        final boolean enableWatermark,
        final boolean enableMetrics) {

    this.clientConfig = Preconditions.checkNotNull(clientConfig, "clientConfig");
    this.stream = Preconditions.checkNotNull(stream, "stream");
    this.serializationSchema = Preconditions.checkNotNull(serializationSchema, "serializationSchema");
    this.eventRouter = Preconditions.checkNotNull(eventRouter, "eventRouter");
    this.writerMode = Preconditions.checkNotNull(writerMode, "writerMode");
    Preconditions.checkArgument(txnLeaseRenewalPeriod > 0, "txnLeaseRenewalPeriod must be > 0");
    this.txnLeaseRenewalPeriod = txnLeaseRenewalPeriod;
    this.enableWatermark = enableWatermark;
    this.enableMetrics = enableMetrics;
    this.writerIdPrefix = UUID.randomUUID().toString();
}
 
Example #6
Source File: Kafka010TableSourceSinkFactoryTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected KafkaTableSinkBase getExpectedKafkaTableSink(
		TableSchema schema,
		String topic,
		Properties properties,
		Optional<FlinkKafkaPartitioner<Row>> partitioner,
		SerializationSchema<Row> serializationSchema) {

	return new Kafka010TableSink(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema
	);
}
 
Example #7
Source File: Elasticsearch6DynamicSinkFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
	TableSchema tableSchema = context.getCatalogTable().getSchema();
	ElasticsearchValidationUtils.validatePrimaryKey(tableSchema);
	final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);

	final EncodingFormat<SerializationSchema<RowData>> format = helper.discoverEncodingFormat(
		SerializationFormatFactory.class,
		FORMAT_OPTION);

	helper.validate();
	Configuration configuration = new Configuration();
	context.getCatalogTable()
		.getOptions()
		.forEach(configuration::setString);
	Elasticsearch6Configuration config = new Elasticsearch6Configuration(configuration, context.getClassLoader());

	validate(config, configuration);

	return new Elasticsearch6DynamicSink(
		format,
		config,
		TableSchemaUtils.getPhysicalSchema(tableSchema));
}
 
Example #8
Source File: ElasticsearchUpsertTableSinkBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public ElasticsearchUpsertSinkFunction(
		String index,
		String docType,
		String keyDelimiter,
		String keyNullLiteral,
		SerializationSchema<Row> serializationSchema,
		XContentType contentType,
		RequestFactory requestFactory,
		int[] keyFieldIndices) {

	this.index = Preconditions.checkNotNull(index);
	this.docType = Preconditions.checkNotNull(docType);
	this.keyDelimiter = Preconditions.checkNotNull(keyDelimiter);
	this.serializationSchema = Preconditions.checkNotNull(serializationSchema);
	this.contentType = Preconditions.checkNotNull(contentType);
	this.keyFieldIndices = Preconditions.checkNotNull(keyFieldIndices);
	this.requestFactory = Preconditions.checkNotNull(requestFactory);
	this.keyNullLiteral = Preconditions.checkNotNull(keyNullLiteral);
}
 
Example #9
Source File: Elasticsearch6UpsertTableSinkFactoryTest.java    From flink with Apache License 2.0 6 votes vote down vote up
public TestElasticsearch6UpsertTableSink(
		boolean isAppendOnly,
		TableSchema schema,
		List<Host> hosts,
		String index,
		String docType,
		String keyDelimiter,
		String keyNullLiteral,
		SerializationSchema<Row> serializationSchema,
		XContentType contentType,
		ActionRequestFailureHandler failureHandler,
		Map<SinkOption, String> sinkOptions) {

	super(
		isAppendOnly,
		schema,
		hosts,
		index,
		docType,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions);
}
 
Example #10
Source File: Elasticsearch7DynamicSinkFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public DynamicTableSink createDynamicTableSink(Context context) {
	TableSchema tableSchema = context.getCatalogTable().getSchema();
	ElasticsearchValidationUtils.validatePrimaryKey(tableSchema);

	final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context);

	final EncodingFormat<SerializationSchema<RowData>> format = helper.discoverEncodingFormat(
		SerializationFormatFactory.class,
		FORMAT_OPTION);

	helper.validate();
	Configuration configuration = new Configuration();
	context.getCatalogTable()
		.getOptions()
		.forEach(configuration::setString);
	Elasticsearch7Configuration config = new Elasticsearch7Configuration(configuration, context.getClassLoader());

	validate(config, configuration);

	return new Elasticsearch7DynamicSink(
		format,
		config,
		TableSchemaUtils.getPhysicalSchema(tableSchema));
}
 
Example #11
Source File: JsonRowFormatFactoryTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void testSchemaSerializationSchema(Map<String, String> properties) {
	final SerializationSchema<?> actual1 = TableFactoryService
		.find(SerializationSchemaFactory.class, properties)
		.createSerializationSchema(properties);
	final SerializationSchema<?> expected1 = new JsonRowSerializationSchema.Builder(SCHEMA).build();
	assertEquals(expected1, actual1);
}
 
Example #12
Source File: Kafka011TableSink.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected SinkFunction<Row> createKafkaProducer(
		String topic,
		Properties properties,
		SerializationSchema<Row> serializationSchema,
		Optional<FlinkKafkaPartitioner<Row>> partitioner) {
	return new FlinkKafkaProducer011<>(
		topic,
		serializationSchema,
		properties,
		partitioner);
}
 
Example #13
Source File: JsonRowFormatFactoryTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private void testJsonSchemaSerializationSchema(Map<String, String> properties) {
	final SerializationSchema<?> actual1 = TableFactoryService
		.find(SerializationSchemaFactory.class, properties)
		.createSerializationSchema(properties);
	final SerializationSchema<?> expected1 = new JsonRowSerializationSchema(JSON_SCHEMA);
	assertEquals(expected1, actual1);
}
 
Example #14
Source File: Kafka010TableSink.java    From flink with Apache License 2.0 5 votes vote down vote up
public Kafka010TableSink(
		TableSchema schema,
		String topic,
		Properties properties,
		Optional<FlinkKafkaPartitioner<Row>> partitioner,
		SerializationSchema<Row> serializationSchema) {
	super(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema);
}
 
Example #15
Source File: JsonRowFormatFactory.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public SerializationSchema<Row> createSerializationSchema(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	// create and configure
	return new JsonRowSerializationSchema(createTypeInformation(descriptorProperties));
}
 
Example #16
Source File: KafkaProducerTestBase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public CustomKeyedSerializationSchemaWrapper(
		SerializationSchema<Tuple2<Long, String>> serializationSchema,
		String defaultTopic,
		String dynamicTopic) {

	super(serializationSchema);

	this.defaultTopic = Preconditions.checkNotNull(defaultTopic);
	this.dynamicTopic = Preconditions.checkNotNull(dynamicTopic);
}
 
Example #17
Source File: Kafka010TableSourceSinkFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected KafkaTableSinkBase createKafkaTableSink(
		TableSchema schema,
		String topic,
		Properties properties,
		Optional<FlinkKafkaPartitioner<Row>> partitioner,
		SerializationSchema<Row> serializationSchema) {

	return new Kafka010TableSink(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema);
}
 
Example #18
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public <T> DataStreamSink<T> produceIntoKafka(
		DataStream<T> stream,
		String topic,
		SerializationSchema<T> serSchema,
		Properties props,
		FlinkKafkaPartitioner<T> partitioner) {
	FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner);
	prod.setFlushOnCheckpoint(true);
	return stream.addSink(prod);
}
 
Example #19
Source File: Kafka010TableSink.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
protected FlinkKafkaProducerBase<Row> createKafkaProducer(
		String topic,
		Properties properties,
		SerializationSchema<Row> serializationSchema,
		Optional<FlinkKafkaPartitioner<Row>> partitioner) {
	return new FlinkKafkaProducer010<>(
		topic,
		serializationSchema,
		properties,
		partitioner.orElse(null));
}
 
Example #20
Source File: RMQSink.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * @param rmqConnectionConfig The RabbitMQ connection configuration {@link RMQConnectionConfig}.
 * @param queueName The queue to publish messages to.
 * @param schema A {@link SerializationSchema} for turning the Java objects received into bytes
 * @param publishOptions A {@link RMQSinkPublishOptions} for providing message's routing key and/or properties
 * @param returnListener A SerializableReturnListener implementation object to handle returned message event
    */
private RMQSink(
		RMQConnectionConfig rmqConnectionConfig,
		@Nullable String queueName,
		SerializationSchema<IN> schema,
		@Nullable RMQSinkPublishOptions<IN> publishOptions,
		@Nullable SerializableReturnListener returnListener) {
	this.rmqConnectionConfig = rmqConnectionConfig;
	this.queueName = queueName;
	this.schema = schema;
	this.publishOptions = publishOptions;
	this.returnListener = returnListener;
}
 
Example #21
Source File: KafkaTableSink.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected SinkFunction<Row> createKafkaProducer(
	String topic,
	Properties properties,
	SerializationSchema<Row> serializationSchema,
	Optional<FlinkKafkaPartitioner<Row>> partitioner) {
	return new FlinkKafkaProducer<>(
		topic,
		new KeyedSerializationSchemaWrapper<>(serializationSchema),
		properties,
		partitioner);
}
 
Example #22
Source File: SocketClientSink.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a new SocketClientSink that retries connections upon failure up to a given number of times.
 * A value of -1 for the number of retries will cause the system to retry an infinite number of times.
 *
 * @param hostName Hostname of the server to connect to.
 * @param port Port of the server.
 * @param schema Schema used to serialize the data into bytes.
 * @param maxNumRetries The maximum number of retries after a message send failed.
 * @param autoflush Flag to indicate whether the socket stream should be flushed after each message.
 */
public SocketClientSink(String hostName, int port, SerializationSchema<IN> schema,
						int maxNumRetries, boolean autoflush) {
	checkArgument(port > 0 && port < 65536, "port is out of range");
	checkArgument(maxNumRetries >= -1, "maxNumRetries must be zero or larger (num retries), or -1 (infinite retries)");

	this.hostName = checkNotNull(hostName, "hostname must not be null");
	this.port = port;
	this.schema = checkNotNull(schema);
	this.maxNumRetries = maxNumRetries;
	this.autoFlush = autoflush;
}
 
Example #23
Source File: CsvRowFormatFactory.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public SerializationSchema<Row> createSerializationSchema(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	final CsvRowSerializationSchema.Builder schemaBuilder = new CsvRowSerializationSchema.Builder(
		createTypeInformation(descriptorProperties));

	descriptorProperties.getOptionalCharacter(CsvValidator.FORMAT_FIELD_DELIMITER)
		.ifPresent(schemaBuilder::setFieldDelimiter);

	descriptorProperties.getOptionalString(CsvValidator.FORMAT_LINE_DELIMITER)
		.ifPresent(schemaBuilder::setLineDelimiter);

	descriptorProperties.getOptionalCharacter(CsvValidator.FORMAT_QUOTE_CHARACTER)
		.ifPresent(schemaBuilder::setQuoteCharacter);

	descriptorProperties.getOptionalString(CsvValidator.FORMAT_ARRAY_ELEMENT_DELIMITER)
		.ifPresent(schemaBuilder::setArrayElementDelimiter);

	descriptorProperties.getOptionalCharacter(CsvValidator.FORMAT_ESCAPE_CHARACTER)
		.ifPresent(schemaBuilder::setEscapeCharacter);

	descriptorProperties.getOptionalString(CsvValidator.FORMAT_NULL_LITERAL)
		.ifPresent(schemaBuilder::setNullLiteral);

	return schemaBuilder.build();
}
 
Example #24
Source File: Kafka010TableSink.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public Kafka010TableSink(
		TableSchema schema,
		String topic,
		Properties properties,
		Optional<FlinkKafkaPartitioner<Row>> partitioner,
		SerializationSchema<Row> serializationSchema) {
	super(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema);
}
 
Example #25
Source File: FlinkPulsarProducer.java    From pulsar with Apache License 2.0 5 votes vote down vote up
public FlinkPulsarProducer(ClientConfigurationData clientConfigurationData,
                           ProducerConfigurationData producerConfigurationData,
                           SerializationSchema<T> serializationSchema,
                           PulsarKeyExtractor<T> keyExtractor,
                           PulsarPropertiesExtractor<T> propertiesExtractor) {
    this.clientConf = checkNotNull(clientConfigurationData, "client conf can not be null");
    this.producerConf = checkNotNull(producerConfigurationData, "producer conf can not be null");
    this.schema = checkNotNull(serializationSchema, "Serialization Schema not set");
    this.flinkPulsarKeyExtractor = getOrNullKeyExtractor(keyExtractor);
    this.flinkPulsarPropertiesExtractor = getOrNullPropertiesExtractor(propertiesExtractor);
    ClosureCleaner.ensureSerializable(serializationSchema);
}
 
Example #26
Source File: AvroRowFormatFactoryTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void testAvroSchemaSerializationSchema(Map<String, String> properties) {
	final SerializationSchema<?> actual1 = TableFactoryService
		.find(SerializationSchemaFactory.class, properties)
		.createSerializationSchema(properties);
	final SerializationSchema<?> expected1 = new AvroRowSerializationSchema(AVRO_SCHEMA);
	assertEquals(expected1, actual1);
}
 
Example #27
Source File: Kafka09TableSourceSinkFactory.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
protected KafkaTableSinkBase createKafkaTableSink(
		TableSchema schema,
		String topic,
		Properties properties,
		Optional<FlinkKafkaPartitioner<Row>> partitioner,
		SerializationSchema<Row> serializationSchema) {

	return new Kafka09TableSink(
		schema,
		topic,
		properties,
		partitioner,
		serializationSchema);
}
 
Example #28
Source File: Elasticsearch6UpsertTableSinkFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected ElasticsearchUpsertTableSinkBase createElasticsearchUpsertTableSink(
		boolean isAppendOnly,
		TableSchema schema,
		List<Host> hosts,
		String index,
		String docType,
		String keyDelimiter,
		String keyNullLiteral,
		SerializationSchema<Row> serializationSchema,
		XContentType contentType,
		ActionRequestFailureHandler failureHandler,
		Map<SinkOption, String> sinkOptions) {

	return new Elasticsearch6UpsertTableSink(
		isAppendOnly,
		schema,
		hosts,
		index,
		docType,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions);
}
 
Example #29
Source File: Elasticsearch6UpsertTableSink.java    From flink with Apache License 2.0 5 votes vote down vote up
public Elasticsearch6UpsertTableSink(
		boolean isAppendOnly,
		TableSchema schema,
		List<Host> hosts,
		String index,
		String docType,
		String keyDelimiter,
		String keyNullLiteral,
		SerializationSchema<Row> serializationSchema,
		XContentType contentType,
		ActionRequestFailureHandler failureHandler,
		Map<SinkOption, String> sinkOptions) {

	super(
		isAppendOnly,
		schema,
		hosts,
		index,
		docType,
		keyDelimiter,
		keyNullLiteral,
		serializationSchema,
		contentType,
		failureHandler,
		sinkOptions,
		UPDATE_REQUEST_FACTORY);
}
 
Example #30
Source File: AbstractStreamingWriterBuilder.java    From flink-connectors with Apache License 2.0 5 votes vote down vote up
/**
 * Creates the sink function for the current builder state.
 *
 * @param serializationSchema the deserialization schema to use.
 * @param eventRouter the event router to use.
 */
protected FlinkPravegaWriter<T> createSinkFunction(SerializationSchema<T> serializationSchema, PravegaEventRouter<T> eventRouter) {
    Preconditions.checkNotNull(serializationSchema, "serializationSchema");
    Preconditions.checkNotNull(eventRouter, "eventRouter");
    return new FlinkPravegaWriter<>(
            getPravegaConfig().getClientConfig(),
            resolveStream(),
            serializationSchema,
            eventRouter,
            writerMode,
            txnLeaseRenewalPeriod.toMilliseconds(),
            enableWatermark,
            isMetricsEnabled());
}