org.apache.flink.streaming.api.datastream.DataStreamSink Java Examples

The following examples show how to use org.apache.flink.streaming.api.datastream.DataStreamSink. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: NiFiSinkTopologyExample.java    From flink with Apache License 2.0 7 votes vote down vote up
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	SiteToSiteClientConfig clientConfig = new SiteToSiteClient.Builder()
			.url("http://localhost:8080/nifi")
			.portName("Data from Flink")
			.buildConfig();

	DataStreamSink<String> dataStream = env.fromElements("one", "two", "three", "four", "five", "q")
			.addSink(new NiFiSink<>(clientConfig, new NiFiDataPacketBuilder<String>() {
				@Override
				public NiFiDataPacket createNiFiDataPacket(String s, RuntimeContext ctx) {
					return new StandardNiFiDataPacket(s.getBytes(ConfigConstants.DEFAULT_CHARSET),
						new HashMap<String, String>());
				}
			}));

	env.execute();
}
 
Example #2
Source File: FlumeSinkExample.java    From bahir-flink with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    //FlumeSink send data
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    FlumeEventBuilder<String> flumeEventBuilder = new FlumeEventBuilder<String>() {
        @Override
        public Event createFlumeEvent(String value, RuntimeContext ctx) {
            return EventBuilder.withBody(value, Charset.defaultCharset());
        }
    };

    FlumeSink<String> flumeSink = new FlumeSink<>(clientType, hostname, port, flumeEventBuilder, 1, 1, 1);

    // Note: parallelisms and FlumeSink batchSize
    // if every parallelism not enough batchSize, this parallelism not word FlumeThriftService output
    DataStreamSink<String> dataStream = env.fromElements("one", "two", "three", "four", "five")
            .addSink(flumeSink);

    env.execute();
}
 
Example #3
Source File: NiFiSinkMain.java    From flink-learning with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    SiteToSiteClientConfig clientConfig = new SiteToSiteClient.Builder()
            .url("http://localhost:8080/nifi")
            .portName("Data from Flink")
            .buildConfig();

    DataStreamSink<String> dataStream = env.fromElements("one", "two", "three", "four", "five", "q")
            .addSink(new NiFiSink<>(clientConfig, new NiFiDataPacketBuilder<String>() {
                @Override
                public NiFiDataPacket createNiFiDataPacket(String s, RuntimeContext ctx) {
                    return new StandardNiFiDataPacket(s.getBytes(ConfigConstants.DEFAULT_CHARSET),
                            new HashMap<String, String>());
                }
            }));

    env.execute();
}
 
Example #4
Source File: CassandraAppendTableSink.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) {
	if (!(dataStream.getType() instanceof RowTypeInfo)) {
		throw new TableException("No support for the type of the given DataStream: " + dataStream.getType());
	}

	CassandraRowSink sink = new CassandraRowSink(
		dataStream.getType().getArity(),
		cql,
		builder,
		CassandraSinkBaseConfig.newBuilder().build(),
		new NoOpCassandraFailureHandler());

	return dataStream
			.addSink(sink)
			.setParallelism(dataStream.getParallelism())
			.name(TableConnectorUtils.generateRuntimeName(this.getClass(), fieldNames));

}
 
Example #5
Source File: CassandraAppendTableSink.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) {
	if (!(dataStream.getType() instanceof RowTypeInfo)) {
		throw new TableException("No support for the type of the given DataStream: " + dataStream.getType());
	}

	CassandraRowSink sink = new CassandraRowSink(
		dataStream.getType().getArity(),
		cql,
		builder,
		CassandraSinkBaseConfig.newBuilder().build(),
		new NoOpCassandraFailureHandler());

	return dataStream
			.addSink(sink)
			.setParallelism(dataStream.getParallelism())
			.name(TableConnectorUtils.generateRuntimeName(this.getClass(), fieldNames));

}
 
Example #6
Source File: HBaseUpsertTableSink.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) {
	Configuration hbaseClientConf = HBaseConfigurationUtil.getHBaseConfiguration();
	hbaseClientConf.set(HConstants.ZOOKEEPER_QUORUM, hbaseOptions.getZkQuorum());
	hbaseOptions.getZkNodeParent().ifPresent(v -> hbaseClientConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, v));
	HBaseSinkFunction sinkFunction = new HBaseSinkFunction(
		hbaseOptions.getTableName(),
		hbaseClientConf,
		new LegacyMutationConverter(hbaseTableSchema),
		writeOptions.getBufferFlushMaxSizeInBytes(),
		writeOptions.getBufferFlushMaxRows(),
		writeOptions.getBufferFlushIntervalMillis());
	return dataStream
		.addSink(sinkFunction)
		.setParallelism(dataStream.getParallelism())
		.name(TableConnectorUtils.generateRuntimeName(this.getClass(), tableSchema.getFieldNames()));
}
 
Example #7
Source File: NiFiSinkMain.java    From flink-learning with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

    SiteToSiteClientConfig clientConfig = new SiteToSiteClient.Builder()
            .url("http://localhost:8080/nifi")
            .portName("Data from Flink")
            .buildConfig();

    DataStreamSink<String> dataStream = env.fromElements("one", "two", "three", "four", "five", "q")
            .addSink(new NiFiSink<>(clientConfig, new NiFiDataPacketBuilder<String>() {
                @Override
                public NiFiDataPacket createNiFiDataPacket(String s, RuntimeContext ctx) {
                    return new StandardNiFiDataPacket(s.getBytes(ConfigConstants.DEFAULT_CHARSET),
                            new HashMap<String, String>());
                }
            }));

    env.execute();
}
 
Example #8
Source File: ElasticsearchUpsertTableSinkBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) {
	final ElasticsearchUpsertSinkFunction upsertFunction =
		new ElasticsearchUpsertSinkFunction(
			index,
			docType,
			keyDelimiter,
			keyNullLiteral,
			serializationSchema,
			contentType,
			requestFactory,
			keyFieldIndices);
	final SinkFunction<Tuple2<Boolean, Row>> sinkFunction = createSinkFunction(
		hosts,
		failureHandler,
		sinkOptions,
		upsertFunction);
	return dataStream.addSink(sinkFunction)
		.setParallelism(dataStream.getParallelism())
		.name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames()));
}
 
Example #9
Source File: NiFiSinkTopologyExample.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	SiteToSiteClientConfig clientConfig = new SiteToSiteClient.Builder()
			.url("http://localhost:8080/nifi")
			.portName("Data from Flink")
			.buildConfig();

	DataStreamSink<String> dataStream = env.fromElements("one", "two", "three", "four", "five", "q")
			.addSink(new NiFiSink<>(clientConfig, new NiFiDataPacketBuilder<String>() {
				@Override
				public NiFiDataPacket createNiFiDataPacket(String s, RuntimeContext ctx) {
					return new StandardNiFiDataPacket(s.getBytes(ConfigConstants.DEFAULT_CHARSET),
						new HashMap<String, String>());
				}
			}));

	env.execute();
}
 
Example #10
Source File: HBaseUpsertTableSink.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) {
	Configuration hbaseClientConf = HBaseConfiguration.create();
	hbaseClientConf.set(HConstants.ZOOKEEPER_QUORUM, hbaseOptions.getZkQuorum());
	hbaseOptions.getZkNodeParent().ifPresent(v -> hbaseClientConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, v));
	HBaseUpsertSinkFunction sinkFunction = new HBaseUpsertSinkFunction(
		hbaseOptions.getTableName(),
		hbaseTableSchema,
		hbaseClientConf,
		writeOptions.getBufferFlushMaxSizeInBytes(),
		writeOptions.getBufferFlushMaxRows(),
		writeOptions.getBufferFlushIntervalMillis());
	return dataStream
		.addSink(sinkFunction)
		.setParallelism(dataStream.getParallelism())
		.name(TableConnectorUtils.generateRuntimeName(this.getClass(), tableSchema.getFieldNames()));
}
 
Example #11
Source File: FlinkKafkaProducer010.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private FlinkKafkaProducer010Configuration(
		DataStreamSink<T> originalSink,
		DataStream<T> inputStream,
		FlinkKafkaProducer010<T> producer) {
	//noinspection unchecked
	super(inputStream, originalSink.getTransformation().getOperator());
	this.transformation = originalSink.getTransformation();
	this.producer = producer;
}
 
Example #12
Source File: KafkaTestEnvironmentImpl.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) {
	FlinkKafkaProducer011<T> prod = new FlinkKafkaProducer011<>(
		topic, serSchema, props, Optional.of(new FlinkFixedPartitioner<>()), producerSemantic, FlinkKafkaProducer011.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE);

	prod.setWriteTimestampToKafka(true);

	return stream.addSink(prod);
}
 
Example #13
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) {
	return stream.addSink(new FlinkKafkaProducer<T>(
		topic,
		serSchema,
		props,
		Optional.ofNullable(partitioner),
		producerSemantic,
		FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
 
Example #14
Source File: KafkaTestEnvironment.java    From flink with Apache License 2.0 5 votes vote down vote up
@Deprecated
public abstract <T> DataStreamSink<T> produceIntoKafka(
	DataStream<T> stream,
	String topic,
	KeyedSerializationSchema<T> serSchema,
	Properties props,
	FlinkKafkaPartitioner<T> partitioner);
 
Example #15
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) {
	return stream.addSink(new FlinkKafkaProducer<T>(
		topic,
		serSchema,
		props,
		Optional.ofNullable(partitioner),
		producerSemantic,
		FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
 
Example #16
Source File: CollectStreamTableSink.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> stream) {
	// add sink
	return stream
		.addSink(new CollectSink<>(targetAddress, targetPort, serializer))
		.name("SQL Client Stream Collect Sink")
		.setParallelism(1);
}
 
Example #17
Source File: MyRetractStreamTableSink.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) {
    return dataStream.addSink(new SinkFunction<Tuple2<Boolean, Row>>() {
        @Override
        public void invoke(Tuple2<Boolean, Row> value, Context context) throws Exception {
            //自定义Sink
            // f0==true :插入新数据
            // f0==false:删除旧数据
            if (value.f0) {
                //可以写入MySQL、Kafka或者发HttpPost...根据具体情况开发
                System.out.println(value.f1);
            }
        }
    });
}
 
Example #18
Source File: KuduTableSink.java    From bahir-flink with Apache License 2.0 5 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStreamTuple) {
    KuduSink upsertKuduSink = new KuduSink(writerConfigBuilder.build(), tableInfo, new UpsertOperationMapper(getTableSchema().getFieldNames()));

    return dataStreamTuple
            .addSink(upsertKuduSink)
            .setParallelism(dataStreamTuple.getParallelism())
            .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getTableSchema().getFieldNames()));
}
 
Example #19
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public <T> DataStreamSink<T> produceIntoKafka(
		DataStream<T> stream,
		String topic,
		SerializationSchema<T> serSchema,
		Properties props,
		FlinkKafkaPartitioner<T> partitioner) {
	return stream.addSink(new FlinkKafkaProducer<T>(
		topic,
		serSchema,
		props,
		partitioner,
		producerSemantic,
		FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
 
Example #20
Source File: KafkaTestEnvironmentImpl.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) {
	return stream.addSink(new FlinkKafkaProducer<T>(
		topic,
		serSchema,
		props,
		Optional.ofNullable(partitioner),
		producerSemantic,
		FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
 
Example #21
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public <T> DataStreamSink<T> produceIntoKafka(
		DataStream<T> stream,
		String topic,
		SerializationSchema<T> serSchema,
		Properties props,
		FlinkKafkaPartitioner<T> partitioner) {
	return stream.addSink(new FlinkKafkaProducer011<>(
		topic,
		new KeyedSerializationSchemaWrapper<>(serSchema),
		props,
		Optional.ofNullable(partitioner),
		producerSemantic,
		FlinkKafkaProducer011.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE));
}
 
Example #22
Source File: FlinkPravegaTableSink.java    From flink-connectors with Apache License 2.0 5 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) {
    checkState(schema != null, "Table sink is not configured");
    FlinkPravegaWriter<Row> writer = writerFactory.apply(schema);
    return dataStream.addSink(writer)
            .setParallelism(dataStream.getParallelism())
            .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames()));
}
 
Example #23
Source File: Sinks.java    From flink-statefun with Apache License 2.0 5 votes vote down vote up
void consumeFrom(SingleOutputStreamOperator<?> mainOutput) {
  sideOutputs.forEach(
      (id, tag) -> {
        final DataStream<Object> sideOutputStream = mainOutput.getSideOutput(tag);

        DecoratedSink decoratedSink = sinks.get(id);
        @SuppressWarnings("unchecked")
        SinkFunction<Object> sink = (SinkFunction<Object>) decoratedSink.sink;

        DataStreamSink<Object> streamSink = sideOutputStream.addSink(sink);
        streamSink.name(decoratedSink.name);
        streamSink.uid(decoratedSink.uid);
      });
}
 
Example #24
Source File: FlinkKafkaProducer010.java    From flink with Apache License 2.0 5 votes vote down vote up
private FlinkKafkaProducer010Configuration(
		DataStreamSink<T> originalSink,
		DataStream<T> inputStream,
		FlinkKafkaProducer010<T> producer) {
	//noinspection unchecked
	super(inputStream, originalSink.getTransformation().getOperator());
	this.transformation = originalSink.getTransformation();
	this.producer = producer;
}
 
Example #25
Source File: FlinkKafkaProducer010.java    From flink with Apache License 2.0 5 votes vote down vote up
private FlinkKafkaProducer010Configuration(
		DataStreamSink<T> originalSink,
		DataStream<T> inputStream,
		FlinkKafkaProducer010<T> producer) {
	//noinspection unchecked
	super(inputStream, originalSink.getTransformation().getOperator());
	this.transformation = originalSink.getTransformation();
	this.producer = producer;
}
 
Example #26
Source File: Sinks.java    From stateful-functions with Apache License 2.0 5 votes vote down vote up
void consumeFrom(SingleOutputStreamOperator<?> mainOutput) {
  sideOutputs.forEach(
      (id, tag) -> {
        final DataStream<Object> sideOutputStream = mainOutput.getSideOutput(tag);

        DecoratedSink decoratedSink = sinks.get(id);
        @SuppressWarnings("unchecked")
        SinkFunction<Object> sink = (SinkFunction<Object>) decoratedSink.sink;

        DataStreamSink<Object> streamSink = sideOutputStream.addSink(sink);
        streamSink.name(decoratedSink.name);
        streamSink.uid(decoratedSink.uid);
      });
}
 
Example #27
Source File: KafkaTestEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public <T> DataStreamSink<T> produceIntoKafka(
		DataStream<T> stream,
		String topic,
		SerializationSchema<T> serSchema,
		Properties props,
		FlinkKafkaPartitioner<T> partitioner) {
	FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner);
	prod.setFlushOnCheckpoint(true);
	return stream.addSink(prod);
}
 
Example #28
Source File: KafkaTableSinkBase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) {
	final SinkFunction<Row> kafkaProducer = createKafkaProducer(
		topic,
		properties,
		serializationSchema,
		partitioner);
	return dataStream
		.addSink(kafkaProducer)
		.setParallelism(dataStream.getParallelism())
		.name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames()));
}
 
Example #29
Source File: CollectStreamTableSink.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> stream) {
	// add sink
	return stream
		.addSink(new CollectSink<>(targetAddress, targetPort, serializer))
		.name("SQL Client Stream Collect Sink")
		.setParallelism(1);
}
 
Example #30
Source File: JDBCUpsertTableSink.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) {
	return dataStream
			.addSink(new JDBCUpsertSinkFunction(newFormat()))
			.setParallelism(dataStream.getParallelism())
			.name(TableConnectorUtils.generateRuntimeName(this.getClass(), schema.getFieldNames()));
}