org.apache.flink.streaming.util.serialization.KeyedSerializationSchema Java Examples
The following examples show how to use
org.apache.flink.streaming.util.serialization.KeyedSerializationSchema.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FlinkKafkaProducerBaseTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaProducer(Properties producerConfig, KeyedSerializationSchema<T> schema, FlinkKafkaPartitioner partitioner) { super(DUMMY_TOPIC, schema, producerConfig, partitioner); this.mockProducer = mock(KafkaProducer.class); when(mockProducer.send(any(ProducerRecord.class), any(Callback.class))).thenAnswer(new Answer<Object>() { @Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { pendingCallbacks.add(invocationOnMock.getArgument(1)); return null; } }); this.pendingCallbacks = new ArrayList<>(); this.flushLatch = new MultiShotLatch(); }
Example #2
Source File: FlinkKafkaProducerBaseTest.java From flink with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") DummyFlinkKafkaProducer(Properties producerConfig, KeyedSerializationSchema<T> schema, FlinkKafkaPartitioner partitioner) { super(DUMMY_TOPIC, schema, producerConfig, partitioner); this.mockProducer = mock(KafkaProducer.class); when(mockProducer.send(any(ProducerRecord.class), any(Callback.class))).thenAnswer(new Answer<Object>() { @Override public Object answer(InvocationOnMock invocationOnMock) throws Throwable { pendingCallbacks.add(invocationOnMock.getArgument(1)); return null; } }); this.pendingCallbacks = new ArrayList<>(); this.flushLatch = new MultiShotLatch(); }
Example #3
Source File: KafkaTestEnvironmentImpl.java From flink with Apache License 2.0 | 5 votes |
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { return stream.addSink(new FlinkKafkaProducer<T>( topic, serSchema, props, Optional.ofNullable(partitioner), producerSemantic, FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE)); }
Example #4
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public <T> StreamSink<T> getProducerSink(String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { return new StreamSink<>(new FlinkKafkaProducer<T>( topic, serSchema, props, Optional.ofNullable(partitioner), producerSemantic, FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE)); }
Example #5
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { return stream.addSink(new FlinkKafkaProducer<T>( topic, serSchema, props, Optional.ofNullable(partitioner), producerSemantic, FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE)); }
Example #6
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) { FlinkKafkaProducer<T> prod = new FlinkKafkaProducer<T>( topic, serSchema, props, Optional.of(new FlinkFixedPartitioner<>()), producerSemantic, FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE); prod.setWriteTimestampToKafka(true); return stream.addSink(prod); }
Example #7
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public <T> StreamSink<T> getProducerSink(String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { return new StreamSink<>(new FlinkKafkaProducer011<>( topic, serSchema, props, Optional.ofNullable(partitioner), producerSemantic, FlinkKafkaProducer011.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE)); }
Example #8
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { return stream.addSink(new FlinkKafkaProducer011<>( topic, serSchema, props, Optional.ofNullable(partitioner), producerSemantic, FlinkKafkaProducer011.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE)); }
Example #9
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) { FlinkKafkaProducer011<T> prod = new FlinkKafkaProducer011<>( topic, serSchema, props, Optional.of(new FlinkFixedPartitioner<>()), producerSemantic, FlinkKafkaProducer011.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE); prod.setWriteTimestampToKafka(true); return stream.addSink(prod); }
Example #10
Source File: FlinkKafkaProducerBase.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * The main constructor for creating a FlinkKafkaProducer. * * @param defaultTopicId The default topic to write data to * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument. * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions. Passing null will use Kafka's partitioner. */ public FlinkKafkaProducerBase(String defaultTopicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig, FlinkKafkaPartitioner<IN> customPartitioner) { requireNonNull(defaultTopicId, "TopicID not set"); requireNonNull(serializationSchema, "serializationSchema not set"); requireNonNull(producerConfig, "producerConfig not set"); ClosureCleaner.clean(customPartitioner, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true); ClosureCleaner.ensureSerializable(serializationSchema); this.defaultTopicId = defaultTopicId; this.schema = serializationSchema; this.producerConfig = producerConfig; this.flinkKafkaPartitioner = customPartitioner; // set the producer configuration properties for kafka record key value serializers. if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) { this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); } else { LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); } if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) { this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); } else { LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); } // eagerly ensure that bootstrap servers are set. if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) { throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " must be supplied in the producer config properties."); } this.topicPartitionsMap = new HashMap<>(); }
Example #11
Source File: FlinkKafkaProducerBase.java From flink with Apache License 2.0 | 5 votes |
/** * The main constructor for creating a FlinkKafkaProducer. * * @param defaultTopicId The default topic to write data to * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument. * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions. Passing null will use Kafka's partitioner. */ public FlinkKafkaProducerBase(String defaultTopicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig, FlinkKafkaPartitioner<IN> customPartitioner) { requireNonNull(defaultTopicId, "TopicID not set"); requireNonNull(serializationSchema, "serializationSchema not set"); requireNonNull(producerConfig, "producerConfig not set"); ClosureCleaner.clean(customPartitioner, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true); ClosureCleaner.ensureSerializable(serializationSchema); this.defaultTopicId = defaultTopicId; this.schema = serializationSchema; this.producerConfig = producerConfig; this.flinkKafkaPartitioner = customPartitioner; // set the producer configuration properties for kafka record key value serializers. if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) { this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); } else { LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); } if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) { this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); } else { LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); } // eagerly ensure that bootstrap servers are set. if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) { throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " must be supplied in the producer config properties."); } this.topicPartitionsMap = new HashMap<>(); }
Example #12
Source File: KafkaTestEnvironmentImpl.java From flink with Apache License 2.0 | 5 votes |
@Override public <T> StreamSink<T> getProducerSink( String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer09<T> prod = new FlinkKafkaProducer09<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return new StreamSink<>(prod); }
Example #13
Source File: KafkaTestEnvironmentImpl.java From flink with Apache License 2.0 | 5 votes |
@Override public <T> StreamSink<T> getProducerSink( String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer08<T> prod = new FlinkKafkaProducer08<>( topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return new StreamSink<>(prod); }
Example #14
Source File: KafkaTestEnvironmentImpl.java From flink with Apache License 2.0 | 5 votes |
@Override public <T> StreamSink<T> getProducerSink(String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { return new StreamSink<>(new FlinkKafkaProducer<T>( topic, serSchema, props, Optional.ofNullable(partitioner), producerSemantic, FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE)); }
Example #15
Source File: KafkaTestEnvironmentImpl.java From flink with Apache License 2.0 | 5 votes |
@Override public <T> StreamSink<T> getProducerSink(String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { return new StreamSink<>(new FlinkKafkaProducer011<>( topic, serSchema, props, Optional.ofNullable(partitioner), producerSemantic, FlinkKafkaProducer011.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE)); }
Example #16
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props); prod.setFlushOnCheckpoint(true); prod.setWriteTimestampToKafka(true); return stream.addSink(prod); }
Example #17
Source File: FlinkKafkaProducer.java From flink with Apache License 2.0 | 4 votes |
/** * @deprecated Use {@link FlinkKafkaProducer08#FlinkKafkaProducer08(String, KeyedSerializationSchema, Properties, KafkaPartitioner)} */ @Deprecated public FlinkKafkaProducer(String topicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig, KafkaPartitioner customPartitioner) { super(topicId, serializationSchema, producerConfig, customPartitioner); }
Example #18
Source File: FlinkKafkaProducer.java From flink with Apache License 2.0 | 4 votes |
/** * Creates a FlinkKafkaProducer for a given topic. The sink produces its input to * the topic. It accepts a keyed {@link KeyedSerializationSchema} and possibly a custom {@link FlinkKafkaPartitioner}. * * <p>If a partitioner is not provided, written records will be partitioned by the attached key of each * record (as determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If written records do not * have a key (i.e., {@link KeyedSerializationSchema#serializeKey(Object)} returns {@code null}), they * will be distributed to Kafka partitions in a round-robin fashion. * * @param defaultTopicId The default topic to write data to * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument. * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions. * If a partitioner is not provided, records will be partitioned by the key of each record * (determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If the keys * are {@code null}, then records will be distributed to Kafka partitions in a * round-robin fashion. * @param semantic Defines semantic that will be used by this producer (see {@link FlinkKafkaProducer.Semantic}). * @param kafkaProducersPoolSize Overwrite default KafkaProducers pool size (see {@link FlinkKafkaProducer.Semantic#EXACTLY_ONCE}). * * @deprecated use {@link #FlinkKafkaProducer(String, KafkaSerializationSchema, Properties, FlinkKafkaProducer.Semantic)} */ @Deprecated public FlinkKafkaProducer( String defaultTopicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig, Optional<FlinkKafkaPartitioner<IN>> customPartitioner, FlinkKafkaProducer.Semantic semantic, int kafkaProducersPoolSize) { this( defaultTopicId, serializationSchema, customPartitioner.orElse(null), null, /* kafka serialization schema */ producerConfig, semantic, kafkaProducersPoolSize); }
Example #19
Source File: KafkaTestEnvironmentImpl.java From flink with Apache License 2.0 | 4 votes |
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer08<T> prod = new FlinkKafkaProducer08<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return stream.addSink(prod); }
Example #20
Source File: FlinkKafkaProducer011.java From flink with Apache License 2.0 | 4 votes |
/** * Creates a FlinkKafkaProducer for a given topic. The sink produces its input to * the topic. It accepts a keyed {@link KeyedSerializationSchema} and possibly a custom {@link FlinkKafkaPartitioner}. * * <p>If a partitioner is not provided, written records will be partitioned by the attached key of each * record (as determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If written records do not * have a key (i.e., {@link KeyedSerializationSchema#serializeKey(Object)} returns {@code null}), they * will be distributed to Kafka partitions in a round-robin fashion. * * @param defaultTopicId The default topic to write data to * @param serializationSchema A serializable serialization schema for turning user objects into a kafka-consumable byte[] supporting key/value messages * @param producerConfig Configuration properties for the KafkaProducer. 'bootstrap.servers.' is the only required argument. * @param customPartitioner A serializable partitioner for assigning messages to Kafka partitions. * If a partitioner is not provided, records will be partitioned by the key of each record * (determined by {@link KeyedSerializationSchema#serializeKey(Object)}). If the keys * are {@code null}, then records will be distributed to Kafka partitions in a * round-robin fashion. * @param semantic Defines semantic that will be used by this producer (see {@link Semantic}). * @param kafkaProducersPoolSize Overwrite default KafkaProducers pool size (see {@link Semantic#EXACTLY_ONCE}). */ public FlinkKafkaProducer011( String defaultTopicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig, Optional<FlinkKafkaPartitioner<IN>> customPartitioner, Semantic semantic, int kafkaProducersPoolSize) { super(new TransactionStateSerializer(), new ContextStateSerializer()); this.defaultTopicId = checkNotNull(defaultTopicId, "defaultTopicId is null"); this.schema = checkNotNull(serializationSchema, "serializationSchema is null"); this.producerConfig = checkNotNull(producerConfig, "producerConfig is null"); this.flinkKafkaPartitioner = checkNotNull(customPartitioner, "customPartitioner is null").orElse(null); this.semantic = checkNotNull(semantic, "semantic is null"); this.kafkaProducersPoolSize = kafkaProducersPoolSize; checkState(kafkaProducersPoolSize > 0, "kafkaProducersPoolSize must be non empty"); ClosureCleaner.clean(this.flinkKafkaPartitioner, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true); ClosureCleaner.ensureSerializable(serializationSchema); // set the producer configuration properties for kafka record key value serializers. if (!producerConfig.containsKey(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG)) { this.producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); } else { LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); } if (!producerConfig.containsKey(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG)) { this.producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); } else { LOG.warn("Overwriting the '{}' is not recommended", ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); } // eagerly ensure that bootstrap servers are set. if (!this.producerConfig.containsKey(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG)) { throw new IllegalArgumentException(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG + " must be supplied in the producer config properties."); } if (!producerConfig.containsKey(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG)) { long timeout = DEFAULT_KAFKA_TRANSACTION_TIMEOUT.toMilliseconds(); checkState(timeout < Integer.MAX_VALUE && timeout > 0, "timeout does not fit into 32 bit integer"); this.producerConfig.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, (int) timeout); LOG.warn("Property [{}] not specified. Setting it to {}", ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, DEFAULT_KAFKA_TRANSACTION_TIMEOUT); } // Enable transactionTimeoutWarnings to avoid silent data loss // See KAFKA-6119 (affects versions 0.11.0.0 and 0.11.0.1): // The KafkaProducer may not throw an exception if the transaction failed to commit if (semantic == Semantic.EXACTLY_ONCE) { final Object object = this.producerConfig.get(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG); final long transactionTimeout; if (object instanceof String && StringUtils.isNumeric((String) object)) { transactionTimeout = Long.parseLong((String) object); } else if (object instanceof Number) { transactionTimeout = ((Number) object).longValue(); } else { throw new IllegalArgumentException(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG + " must be numeric, was " + object); } super.setTransactionTimeout(transactionTimeout); super.enableTransactionTimeoutWarnings(0.8); } this.topicPartitionsMap = new HashMap<>(); }
Example #21
Source File: KafkaConsumerTestBase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
protected String writeSequence( String baseTopicName, final int numElements, final int parallelism, final int replicationFactor) throws Exception { LOG.info("\n===================================\n" + "== Writing sequence of " + numElements + " into " + baseTopicName + " with p=" + parallelism + "\n" + "==================================="); final TypeInformation<Tuple2<Integer, Integer>> resultType = TypeInformation.of(new TypeHint<Tuple2<Integer, Integer>>() {}); final KeyedSerializationSchema<Tuple2<Integer, Integer>> serSchema = new KeyedSerializationSchemaWrapper<>( new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig())); final KafkaDeserializationSchema<Tuple2<Integer, Integer>> deserSchema = new KafkaDeserializationSchemaWrapper<>( new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig())); final int maxNumAttempts = 10; for (int attempt = 1; attempt <= maxNumAttempts; attempt++) { final String topicName = baseTopicName + '-' + attempt; LOG.info("Writing attempt #" + attempt); // -------- Write the Sequence -------- createTestTopic(topicName, parallelism, replicationFactor); StreamExecutionEnvironment writeEnv = StreamExecutionEnvironment.getExecutionEnvironment(); writeEnv.getConfig().setRestartStrategy(RestartStrategies.noRestart()); writeEnv.getConfig().disableSysoutLogging(); DataStream<Tuple2<Integer, Integer>> stream = writeEnv.addSource(new RichParallelSourceFunction<Tuple2<Integer, Integer>>() { private boolean running = true; @Override public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception { int cnt = 0; int partition = getRuntimeContext().getIndexOfThisSubtask(); while (running && cnt < numElements) { ctx.collect(new Tuple2<>(partition, cnt)); cnt++; } } @Override public void cancel() { running = false; } }).setParallelism(parallelism); // the producer must not produce duplicates Properties producerProperties = FlinkKafkaProducerBase.getPropertiesFromBrokerList(brokerConnectionStrings); producerProperties.setProperty("retries", "0"); producerProperties.putAll(secureProps); kafkaServer.produceIntoKafka(stream, topicName, serSchema, producerProperties, new Tuple2FlinkPartitioner(parallelism)) .setParallelism(parallelism); try { writeEnv.execute("Write sequence"); } catch (Exception e) { LOG.error("Write attempt failed, trying again", e); deleteTestTopic(topicName); waitUntilNoJobIsRunning(client); continue; } LOG.info("Finished writing sequence"); // -------- Validate the Sequence -------- // we need to validate the sequence, because kafka's producers are not exactly once LOG.info("Validating sequence"); waitUntilNoJobIsRunning(client); if (validateSequence(topicName, parallelism, deserSchema, numElements)) { // everything is good! return topicName; } else { deleteTestTopic(topicName); // fall through the loop } } throw new Exception("Could not write a valid sequence to Kafka after " + maxNumAttempts + " attempts"); }
Example #22
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) { throw new UnsupportedOperationException(); }
Example #23
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public <T> DataStreamSink<T> writeToKafkaWithTimestamps(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props) { throw new UnsupportedOperationException(); }
Example #24
Source File: KafkaTestEnvironment.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
public abstract <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner);
Example #25
Source File: KafkaTestEnvironment.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
public abstract <T> StreamSink<T> getProducerSink(String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner);
Example #26
Source File: FlinkKafkaProducer.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * @deprecated Use {@link FlinkKafkaProducer08#FlinkKafkaProducer08(String, String, KeyedSerializationSchema)} */ @Deprecated public FlinkKafkaProducer(String brokerList, String topicId, KeyedSerializationSchema<IN> serializationSchema) { super(topicId, serializationSchema, getPropertiesFromBrokerList(brokerList), (FlinkKafkaPartitioner<IN>) null); }
Example #27
Source File: FlinkKafkaProducer.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * @deprecated Use {@link FlinkKafkaProducer08#FlinkKafkaProducer08(String, KeyedSerializationSchema, Properties)} */ @Deprecated public FlinkKafkaProducer(String topicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig) { super(topicId, serializationSchema, producerConfig, (FlinkKafkaPartitioner<IN>) null); }
Example #28
Source File: KafkaTestEnvironmentImpl.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Override public <T> DataStreamSink<T> produceIntoKafka(DataStream<T> stream, String topic, KeyedSerializationSchema<T> serSchema, Properties props, FlinkKafkaPartitioner<T> partitioner) { FlinkKafkaProducer010<T> prod = new FlinkKafkaProducer010<>(topic, serSchema, props, partitioner); prod.setFlushOnCheckpoint(true); return stream.addSink(prod); }
Example #29
Source File: FlinkKafkaProducer.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * @deprecated Use {@link FlinkKafkaProducer08#FlinkKafkaProducer08(String, KeyedSerializationSchema, Properties, KafkaPartitioner)} */ @Deprecated public FlinkKafkaProducer(String topicId, KeyedSerializationSchema<IN> serializationSchema, Properties producerConfig, KafkaPartitioner customPartitioner) { super(topicId, serializationSchema, producerConfig, customPartitioner); }
Example #30
Source File: KafkaConsumerTestBase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
protected void writeAppendSequence( String topicName, final int originalNumElements, final int numElementsToAppend, final int parallelism) throws Exception { LOG.info("\n===================================\n" + "== Appending sequence of " + numElementsToAppend + " into " + topicName + "==================================="); final TypeInformation<Tuple2<Integer, Integer>> resultType = TypeInformation.of(new TypeHint<Tuple2<Integer, Integer>>() {}); final KeyedSerializationSchema<Tuple2<Integer, Integer>> serSchema = new KeyedSerializationSchemaWrapper<>( new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig())); final KafkaDeserializationSchema<Tuple2<Integer, Integer>> deserSchema = new KafkaDeserializationSchemaWrapper<>( new TypeInformationSerializationSchema<>(resultType, new ExecutionConfig())); // -------- Write the append sequence -------- StreamExecutionEnvironment writeEnv = StreamExecutionEnvironment.getExecutionEnvironment(); writeEnv.getConfig().setRestartStrategy(RestartStrategies.noRestart()); writeEnv.getConfig().disableSysoutLogging(); DataStream<Tuple2<Integer, Integer>> stream = writeEnv.addSource(new RichParallelSourceFunction<Tuple2<Integer, Integer>>() { private boolean running = true; @Override public void run(SourceContext<Tuple2<Integer, Integer>> ctx) throws Exception { int cnt = originalNumElements; int partition = getRuntimeContext().getIndexOfThisSubtask(); while (running && cnt < numElementsToAppend + originalNumElements) { ctx.collect(new Tuple2<>(partition, cnt)); cnt++; } } @Override public void cancel() { running = false; } }).setParallelism(parallelism); // the producer must not produce duplicates Properties producerProperties = FlinkKafkaProducerBase.getPropertiesFromBrokerList(brokerConnectionStrings); producerProperties.setProperty("retries", "0"); producerProperties.putAll(secureProps); kafkaServer.produceIntoKafka(stream, topicName, serSchema, producerProperties, new Tuple2FlinkPartitioner(parallelism)) .setParallelism(parallelism); try { writeEnv.execute("Write sequence"); } catch (Exception e) { throw new Exception("Failed to append sequence to Kafka; append job failed.", e); } LOG.info("Finished writing append sequence"); // we need to validate the sequence, because kafka's producers are not exactly once LOG.info("Validating sequence"); while (!getRunningJobs(client).isEmpty()){ Thread.sleep(50); } if (!validateSequence(topicName, parallelism, deserSchema, originalNumElements + numElementsToAppend)) { throw new Exception("Could not append a valid sequence to Kafka."); } }