org.apache.flink.table.utils.TableConnectorUtils Java Examples
The following examples show how to use
org.apache.flink.table.utils.TableConnectorUtils.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CsvTableSink.java From flink with Apache License 2.0 | 6 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) { SingleOutputStreamOperator<String> csvRows = dataStream.map(new CsvFormatter(fieldDelim == null ? "," : fieldDelim)); DataStreamSink<String> sink; if (writeMode != null) { sink = csvRows.writeAsText(path, writeMode); } else { sink = csvRows.writeAsText(path); } if (numFiles > 0) { csvRows.setParallelism(numFiles); sink.setParallelism(numFiles); } else { // if file number is not set, use input parallelism to make it chained. csvRows.setParallelism(dataStream.getParallelism()); sink.setParallelism(dataStream.getParallelism()); } sink.name(TableConnectorUtils.generateRuntimeName(CsvTableSink.class, fieldNames)); return sink; }
Example #2
Source File: CsvTableSink.java From flink with Apache License 2.0 | 6 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) { SingleOutputStreamOperator<String> csvRows = dataStream.map(new CsvFormatter(fieldDelim == null ? "," : fieldDelim)); DataStreamSink<String> sink; if (writeMode != null) { sink = csvRows.writeAsText(path, writeMode); } else { sink = csvRows.writeAsText(path); } if (numFiles > 0) { csvRows.setParallelism(numFiles); sink.setParallelism(numFiles); } else { // if file number is not set, use input parallelism to make it chained. csvRows.setParallelism(dataStream.getParallelism()); sink.setParallelism(dataStream.getParallelism()); } sink.name(TableConnectorUtils.generateRuntimeName(CsvTableSink.class, fieldNames)); return sink; }
Example #3
Source File: CsvTableSink.java From flink with Apache License 2.0 | 6 votes |
@Override public DataSink<?> consumeDataSet(DataSet<Row> dataSet) { MapOperator<Row, String> csvRows = dataSet.map(new CsvFormatter(fieldDelim == null ? "," : fieldDelim)); DataSink<String> sink; if (writeMode != null) { sink = csvRows.writeAsText(path, writeMode); } else { sink = csvRows.writeAsText(path); } if (numFiles > 0) { csvRows.setParallelism(numFiles); sink.setParallelism(numFiles); } return sink.name(TableConnectorUtils.generateRuntimeName(CsvTableSink.class, fieldNames)); }
Example #4
Source File: ElasticsearchUpsertTableSinkBase.java From flink with Apache License 2.0 | 6 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) { final ElasticsearchUpsertSinkFunction upsertFunction = new ElasticsearchUpsertSinkFunction( IndexGeneratorFactory.createIndexGenerator(index, schema), docType, keyDelimiter, keyNullLiteral, serializationSchema, contentType, requestFactory, keyFieldIndices); final SinkFunction<Tuple2<Boolean, Row>> sinkFunction = createSinkFunction( hosts, failureHandler, sinkOptions, upsertFunction); return dataStream.addSink(sinkFunction) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); }
Example #5
Source File: HBaseUpsertTableSink.java From flink with Apache License 2.0 | 6 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) { Configuration hbaseClientConf = HBaseConfigurationUtil.getHBaseConfiguration(); hbaseClientConf.set(HConstants.ZOOKEEPER_QUORUM, hbaseOptions.getZkQuorum()); hbaseOptions.getZkNodeParent().ifPresent(v -> hbaseClientConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, v)); HBaseSinkFunction sinkFunction = new HBaseSinkFunction( hbaseOptions.getTableName(), hbaseClientConf, new LegacyMutationConverter(hbaseTableSchema), writeOptions.getBufferFlushMaxSizeInBytes(), writeOptions.getBufferFlushMaxRows(), writeOptions.getBufferFlushIntervalMillis()); return dataStream .addSink(sinkFunction) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), tableSchema.getFieldNames())); }
Example #6
Source File: CassandraAppendTableSink.java From flink with Apache License 2.0 | 6 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) { if (!(dataStream.getType() instanceof RowTypeInfo)) { throw new TableException("No support for the type of the given DataStream: " + dataStream.getType()); } CassandraRowSink sink = new CassandraRowSink( dataStream.getType().getArity(), cql, builder, CassandraSinkBaseConfig.newBuilder().build(), new NoOpCassandraFailureHandler()); return dataStream .addSink(sink) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), fieldNames)); }
Example #7
Source File: Elasticsearch6TableSink.java From alchemy with Apache License 2.0 | 6 votes |
@Override public void emitDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) { Elasticsearch6SinkFunction upsertFunction = new Elasticsearch6SinkFunction( index, findIndex(indexField, schema.getFieldNames()), docType, keyDelimiter, keyNullLiteral, serializationSchema, contentType, requestFactory, keyFieldIndices); final SinkFunction<Tuple2<Boolean, Row>> sinkFunction = createSinkFunction( hosts, failureHandler, upsertFunction); dataStream.addSink(sinkFunction) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); }
Example #8
Source File: CsvTableSink.java From flink with Apache License 2.0 | 6 votes |
@Override public void emitDataSet(DataSet<Row> dataSet) { MapOperator<Row, String> csvRows = dataSet.map(new CsvFormatter(fieldDelim == null ? "," : fieldDelim)); DataSink<String> sink; if (writeMode != null) { sink = csvRows.writeAsText(path, writeMode); } else { sink = csvRows.writeAsText(path); } if (numFiles > 0) { csvRows.setParallelism(numFiles); sink.setParallelism(numFiles); } sink.name(TableConnectorUtils.generateRuntimeName(CsvTableSink.class, fieldNames)); }
Example #9
Source File: HBaseUpsertTableSink.java From flink with Apache License 2.0 | 6 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) { Configuration hbaseClientConf = HBaseConfiguration.create(); hbaseClientConf.set(HConstants.ZOOKEEPER_QUORUM, hbaseOptions.getZkQuorum()); hbaseOptions.getZkNodeParent().ifPresent(v -> hbaseClientConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, v)); HBaseUpsertSinkFunction sinkFunction = new HBaseUpsertSinkFunction( hbaseOptions.getTableName(), hbaseTableSchema, hbaseClientConf, writeOptions.getBufferFlushMaxSizeInBytes(), writeOptions.getBufferFlushMaxRows(), writeOptions.getBufferFlushIntervalMillis()); return dataStream .addSink(sinkFunction) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), tableSchema.getFieldNames())); }
Example #10
Source File: ElasticsearchUpsertTableSinkBase.java From flink with Apache License 2.0 | 6 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) { final ElasticsearchUpsertSinkFunction upsertFunction = new ElasticsearchUpsertSinkFunction( index, docType, keyDelimiter, keyNullLiteral, serializationSchema, contentType, requestFactory, keyFieldIndices); final SinkFunction<Tuple2<Boolean, Row>> sinkFunction = createSinkFunction( hosts, failureHandler, sinkOptions, upsertFunction); return dataStream.addSink(sinkFunction) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); }
Example #11
Source File: ElasticsearchUpsertTableSinkBase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Override public void emitDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) { final ElasticsearchUpsertSinkFunction upsertFunction = new ElasticsearchUpsertSinkFunction( index, docType, keyDelimiter, keyNullLiteral, serializationSchema, contentType, requestFactory, keyFieldIndices); final SinkFunction<Tuple2<Boolean, Row>> sinkFunction = createSinkFunction( hosts, failureHandler, sinkOptions, upsertFunction); dataStream.addSink(sinkFunction) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); }
Example #12
Source File: CassandraAppendTableSink.java From flink with Apache License 2.0 | 6 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) { if (!(dataStream.getType() instanceof RowTypeInfo)) { throw new TableException("No support for the type of the given DataStream: " + dataStream.getType()); } CassandraRowSink sink = new CassandraRowSink( dataStream.getType().getArity(), cql, builder, CassandraSinkBaseConfig.newBuilder().build(), new NoOpCassandraFailureHandler()); return dataStream .addSink(sink) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), fieldNames)); }
Example #13
Source File: KafkaTableSinkBase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void emitDataStream(DataStream<Row> dataStream) { final SinkFunction<Row> kafkaProducer = createKafkaProducer( topic, properties, serializationSchema, partitioner); dataStream.addSink(kafkaProducer).name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); }
Example #14
Source File: JdbcUpsertTableSink.java From flink with Apache License 2.0 | 5 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) { return dataStream .addSink(new GenericJdbcSinkFunction<>(newFormat())) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), schema.getFieldNames())); }
Example #15
Source File: KuduTableSink.java From bahir-flink with Apache License 2.0 | 5 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStreamTuple) { KuduSink upsertKuduSink = new KuduSink(writerConfigBuilder.build(), tableInfo, new UpsertOperationMapper(getTableSchema().getFieldNames())); return dataStreamTuple .addSink(upsertKuduSink) .setParallelism(dataStreamTuple.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getTableSchema().getFieldNames())); }
Example #16
Source File: HiveTableSource.java From flink with Apache License 2.0 | 5 votes |
@Override public String explainSource() { String explain = String.format(" TablePath: %s, PartitionPruned: %s, PartitionNums: %d", tablePath.getFullName(), partitionPruned, null == remainingPartitions ? null : remainingPartitions.size()); if (projectedFields != null) { explain += ", ProjectedFields: " + Arrays.toString(projectedFields); } if (isLimitPushDown) { explain += String.format(", LimitPushDown %s, Limit %d", isLimitPushDown, limit); } return TableConnectorUtils.generateRuntimeName(getClass(), getTableSchema().getFieldNames()) + explain; }
Example #17
Source File: FlinkPravegaTableSink.java From flink-connectors with Apache License 2.0 | 5 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) { checkState(schema != null, "Table sink is not configured"); FlinkPravegaWriter<Row> writer = writerFactory.apply(schema); return dataStream.addSink(writer) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); }
Example #18
Source File: HiveBatchSource.java From Alink with Apache License 2.0 | 5 votes |
@Override public String explainSource() { String explain = String.format(" TablePath: %s, PartitionPruned: %s, PartitionNums: %d", tablePath.getFullName(), partitionPruned, null == remainingPartitions ? null : remainingPartitions.size()); if (projectedFields != null) { explain += ", ProjectedFields: " + Arrays.toString(projectedFields); } if (isLimitPushDown) { explain += String.format(", LimitPushDown %s, Limit %d", isLimitPushDown, limit); } return TableConnectorUtils.generateRuntimeName(getClass(), getTableSchema().getFieldNames()) + explain; }
Example #19
Source File: CassandraAppendTableSink.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public void emitDataStream(DataStream<Row> dataStream) { try { CassandraSink.addSink(dataStream) .setClusterBuilder(this.builder) .setQuery(this.cql) .build() .name(TableConnectorUtils.generateRuntimeName(this.getClass(), fieldNames)); } catch (Exception e) { throw new RuntimeException(e); } }
Example #20
Source File: KafkaTableSinkBase.java From flink with Apache License 2.0 | 5 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) { final SinkFunction<Row> kafkaProducer = createKafkaProducer( topic, properties, serializationSchema, partitioner); return dataStream .addSink(kafkaProducer) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); }
Example #21
Source File: FileSystemTableSource.java From flink with Apache License 2.0 | 5 votes |
@Override public String explainSource() { return TableConnectorUtils.generateRuntimeName(getClass(), getTableSchema().getFieldNames()) + (readPartitions == null ? "" : ", readPartitions=" + readPartitions) + (selectFields == null ? "" : ", selectFields=" + Arrays.toString(selectFields)) + (limit == null ? "" : ", limit=" + limit) + (filters == null ? "" : ", filters=" + filtersString()); }
Example #22
Source File: JDBCAppendTableSink.java From flink with Apache License 2.0 | 5 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) { return dataStream .addSink(new JDBCSinkFunction(outputFormat)) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), fieldNames)); }
Example #23
Source File: JDBCUpsertTableSink.java From flink with Apache License 2.0 | 5 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) { return dataStream .addSink(new JDBCUpsertSinkFunction(newFormat())) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), schema.getFieldNames())); }
Example #24
Source File: KafkaTableSinkBase.java From flink with Apache License 2.0 | 5 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) { final SinkFunction<Row> kafkaProducer = createKafkaProducer( topic, properties, serializationSchema, partitioner); return dataStream .addSink(kafkaProducer) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); }
Example #25
Source File: PulsarTableSink.java From pulsar-flink with Apache License 2.0 | 5 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) { FlinkPulsarRowSink sink = new FlinkPulsarRowSink(adminUrl, defaultTopicName, clientConf, properties, schema.toRowDataType()); return dataStream .addSink(sink) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(getClass(), getFieldNames())); }
Example #26
Source File: KafkaTableSourceBase.java From flink with Apache License 2.0 | 4 votes |
@Override public String explainSource() { return TableConnectorUtils.generateRuntimeName(this.getClass(), schema.getFieldNames()); }
Example #27
Source File: TableConnectorUtil.java From flink with Apache License 2.0 | 4 votes |
/** * Returns the table connector name used for log and web UI. */ public static String generateRuntimeName(Class<?> clazz, String[] fields) { return TableConnectorUtils.generateRuntimeName(clazz, fields); }
Example #28
Source File: HBaseTableSource.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public String explainSource() { return TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames()); }
Example #29
Source File: KafkaTableSourceBase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public String explainSource() { return TableConnectorUtils.generateRuntimeName(this.getClass(), schema.getFieldNames()); }
Example #30
Source File: RedisTableSink.java From bahir-flink with Apache License 2.0 | 4 votes |
@Override public DataStreamSink<?> consumeDataStream(DataStream<Tuple2<Boolean, Row>> dataStream) { return dataStream.addSink(new RedisSink(flinkJedisConfigBase, redisMapper)) .setParallelism(dataStream.getParallelism()) .name(TableConnectorUtils.generateRuntimeName(this.getClass(), getFieldNames())); }