Java Code Examples for org.apache.kafka.clients.producer.KafkaProducer#send()

The following examples show how to use org.apache.kafka.clients.producer.KafkaProducer#send() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: WebKafkaConsumerTest.java    From kafka-webview with MIT License 6 votes vote down vote up
public void publishDummyDataNumbers() {
    final String topic = "NumbersTopic";

    // Create publisher
    final Map<String, Object> config = new HashMap<>();
    config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
    config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class);
    config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");

    final KafkaProducer<Integer, Integer> producer = new KafkaProducer<>(config);
    for (int value = 0; value < 10000; value++) {
        producer.send(new ProducerRecord<>(topic, value, value));
    }
    producer.flush();
    producer.close();
}
 
Example 2
Source File: KafkaUtil.java    From flink-learning with Apache License 2.0 6 votes vote down vote up
public static void writeToKafka() throws InterruptedException {
    Properties props = new Properties();
    props.put("bootstrap.servers", broker_list);
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    KafkaProducer producer = new KafkaProducer<String, String>(props);

    for (int i = 1; i <= 100; i++) {
        Student student = new Student(i, "zhisheng" + i, "password" + i, 18 + i);
        ProducerRecord record = new ProducerRecord<String, String>(topic, null, null, GsonUtil.toJson(student));
        producer.send(record);
        System.out.println("发送数据: " + GsonUtil.toJson(student));
        Thread.sleep(10 * 1000); //发送一条数据 sleep 10s,相当于 1 分钟 6 条
    }
    producer.flush();
}
 
Example 3
Source File: IPLogProducer.java    From Building-Data-Streaming-Applications-with-Apache-Kafka with MIT License 6 votes vote down vote up
public static void main(final String[] args) {
    IPLogProducer ipLogProducer = new IPLogProducer();
    Properties producerProps = new Properties();

    //replace broker ip with your kafka broker ip
    producerProps.put("bootstrap.servers", "localhost:9092");
    producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("auto.create.topics.enable","true");

    KafkaProducer<String, String> ipProducer = new KafkaProducer<String, String>(producerProps);

    try (Scanner scanner = new Scanner(ipLogProducer.readfile())) {
        while (scanner.hasNextLine()) {
            String line = scanner.nextLine();
            ProducerRecord ipData = new ProducerRecord<String, String>("iplog", line);
            Future<RecordMetadata> recordMetadata = ipProducer.send(ipData);
        }
        scanner.close();

    } catch (IOException e) {
        e.printStackTrace();
    }
    ipProducer.close();
}
 
Example 4
Source File: BuildLogEventDataUtil.java    From flink-learning with Apache License 2.0 6 votes vote down vote up
public static void writeDataToKafka() {
        Properties props = new Properties();
        props.put("bootstrap.servers", BROKER_LIST);
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        KafkaProducer producer = new KafkaProducer<String, String>(props);

        for (int i = 0; i < 100; i++) {
            LogEvent logEvent = new LogEvent().builder()
                    .type("app")
                    .timestamp(System.currentTimeMillis())
                    .level(logLevel())
                    .message(message(i + 1))
                    .tags(mapData())
                    .build();
//            System.out.println(logEvent);
            ProducerRecord record = new ProducerRecord<String, String>(LOG_TOPIC, null, null, GsonUtil.toJson(logEvent));
            producer.send(record);
        }
        producer.flush();
    }
 
Example 5
Source File: kafkaProducer.java    From wingcloud with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception{
    Properties prop = new Properties();
    //指定kafka broker地址
    prop.put("bootstrap.servers", "192.168.43.201:9092");
    //指定key value的序列化方式
    prop.put("key.serializer", StringSerializer.class.getName());
    prop.put("value.serializer", StringSerializer.class.getName());

    //指定topic名称
    String topic = "testsource";

    //创建producer链接
    KafkaProducer<String, String> producer = new KafkaProducer<String,String>(prop);

    //
    //生产消息

    while(true){
        String message = "{\"userid\":\""+getUserId()+"\",\"usergender\":\""+getUserGender()+"\",\"userage\":\""+getUserAge()+"\",\"userarea\":\""+getUserArea()+"\",\"shopid\":\""+getShopId()+"\",\"shoptype\":\""+shoptype+"\",\"shoptime\":\""+getShopTime()+"\"}";
        System.out.println(message);
        producer.send(new ProducerRecord<String, String>(topic,message));
        Thread.sleep(2000);
    }
    //关闭链接
    //producer.close();
}
 
Example 6
Source File: Kafka10DataLoader.java    From kylin with Apache License 2.0 6 votes vote down vote up
public void loadIntoKafka(List<String> messages) {

        KafkaClusterConfig clusterConfig = kafkaClusterConfigs.get(0);
        String brokerList = StringUtils.join(Collections2.transform(clusterConfig.getBrokerConfigs(), new Function<BrokerConfig, String>() {
            @Nullable
            @Override
            public String apply(BrokerConfig brokerConfig) {
                return brokerConfig.getHost() + ":" + brokerConfig.getPort();
            }
        }), ",");

        KafkaProducer producer = getKafkaProducer(brokerList, null);

        for (int i = 0; i < messages.size(); i++) {
            ProducerRecord<String, String> keyedMessage = new ProducerRecord<String, String>(clusterConfig.getTopic(), String.valueOf(i), messages.get(i));
            producer.send(keyedMessage);
        }
        logger.info("sent " + messages.size() + " messages to " + this.toString());
        producer.close();
    }
 
Example 7
Source File: UnionListStateUtil.java    From flink-learning with Apache License 2.0 6 votes vote down vote up
public static void writeToKafka() throws InterruptedException {
    Properties props = new Properties();
    props.put("bootstrap.servers", broker_list);
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    KafkaProducer producer = new KafkaProducer<String, String>(props);

    // 生成 0~9 的随机数做为 appId
    for(int i = 0; i<5; i++){
        String value = "" + new Random().nextInt(10);
        ProducerRecord record = new ProducerRecord<String, String>(topic, null, null, value);
        producer.send(record);
    }

    System.out.println("发送数据: " );
    producer.flush();
}
 
Example 8
Source File: BaseService.java    From whirlpool with Apache License 2.0 5 votes vote down vote up
@Override
public String call() throws Exception {
    // set up the producer
    KafkaProducer<String, String> producer;
    try (InputStream props = Resources.getResource("producer.props").openStream()) {
        Properties properties = new Properties();
        properties.load(props);
        producer = new KafkaProducer<>(properties);
    }

    String message;

    try {
        while (keepRunning.get()) {
            while ((message = responseQueue.poll()) != null) {
                logger.debug(String.format("Sending message: '%s' to topic: '%s'", message, topic));

                producer.send(new ProducerRecord<>(topic, message),
                        (metadata, e) -> {
                            if (e != null) {
                                logger.error(e.getMessage(), e);
                            }

                            logger.trace(String.format("The offset of the record we just sent is: %d", metadata.offset()));
                        });
            }

            producer.flush();

            // Don't busy wait
            Thread.sleep(20L);
        }
    } catch (Throwable throwable) {
        logger.error(throwable.getMessage(), throwable);
    } finally {
        producer.close();
    }

    return "done";
}
 
Example 9
Source File: TestUtils.java    From uReplicator with Apache License 2.0 5 votes vote down vote up
public static void produceMessages(String bootstrapServer, String topicname, int messageCount) {
  KafkaProducer producer = createProducer(bootstrapServer);
  for (int i = 0; i < messageCount; i++) {
    ProducerRecord<Byte[], Byte[]> record = new ProducerRecord(topicname, null,
        String.format("Test Value - %d", i).getBytes());
    producer.send(record);
  }
  producer.flush();
  producer.close();
}
 
Example 10
Source File: TransactionOnlySend.java    From kafka_book_demo with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    Properties properties = new Properties();
    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
            StringSerializer.class.getName());
    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
            StringSerializer.class.getName());
    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
    properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionId);

    KafkaProducer<String, String> producer = new KafkaProducer<>(properties);

    producer.initTransactions();
    producer.beginTransaction();

    try {
        //处理业务逻辑并创建ProducerRecord
        ProducerRecord<String, String> record1 = new ProducerRecord<>(topic, "msg1");
        producer.send(record1);
        ProducerRecord<String, String> record2 = new ProducerRecord<>(topic, "msg2");
        producer.send(record2);
        ProducerRecord<String, String> record3 = new ProducerRecord<>(topic, "msg3");
        producer.send(record3);
        //处理一些其它逻辑
        producer.commitTransaction();
    } catch (ProducerFencedException e) {
        producer.abortTransaction();
    }
    producer.close();
}
 
Example 11
Source File: UvExampleUtil.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
public static void writeToKafka() throws InterruptedException {
    Properties props = new Properties();
    props.put("bootstrap.servers", broker_list);
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    KafkaProducer producer = new KafkaProducer<String, String>(props);

    // 生成 0~9 的随机数做为 appId
    for(int i = 0; i<10; i++){
        String yyyyMMdd = new DateTime(System.currentTimeMillis()).toString("yyyyMMdd");
        int pageId = random.nextInt(10);    // 随机生成页面 id
        int userId = random.nextInt(100);   // 随机生成用户 id

        UserVisitWebEvent userVisitWebEvent = UserVisitWebEvent.builder()
                .id(UUID.randomUUID().toString())   // 日志的唯一 id
                .date(yyyyMMdd)                     // 日期
                .pageId(pageId)                     // 页面 id
                .userId(Integer.toString(userId))   // 用户 id
                .url("url/" + pageId)               // 页面的 url
                .build();
        // 对象序列化为 JSON 发送到 Kafka
        ProducerRecord record = new ProducerRecord<String, String>(topic,
                null, null, GsonUtil.toJson(userVisitWebEvent));
        producer.send(record);
        System.out.println("发送数据: " + GsonUtil.toJson(userVisitWebEvent));
    }
    producer.flush();
}
 
Example 12
Source File: FastenKafkaPlugin.java    From fasten with Apache License 2.0 5 votes vote down vote up
/**
 * Send message to Kafka topic.
 *
 * @param producer Kafka producer
 * @param topic    topic to send to
 * @param msg      message
 */
private void emitMessage(KafkaProducer<String, String> producer, String topic, String msg) {
    ProducerRecord<String, String> record = new ProducerRecord<>(topic, msg);

    producer.send(record, (recordMetadata, e) -> {
        if (recordMetadata != null) {
            logger.debug("Sent: {} to {}", msg, topic);
        } else {
            e.printStackTrace();
        }
    });

    producer.flush();
}
 
Example 13
Source File: TransactionOnlySend.java    From BigData-In-Practice with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    Properties properties = new Properties();
    properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,                StringSerializer.class.getName());
    properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,                StringSerializer.class.getName());
    properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
    properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionId);

    KafkaProducer<String, String> producer = new KafkaProducer<>(properties);

    producer.initTransactions();
    producer.beginTransaction();

    try {
        //处理业务逻辑并创建ProducerRecord
        ProducerRecord<String, String> record1 = new ProducerRecord<>(topic, "msg1");
        producer.send(record1);
        ProducerRecord<String, String> record2 = new ProducerRecord<>(topic, "msg2");
        producer.send(record2);
        ProducerRecord<String, String> record3 = new ProducerRecord<>(topic, "msg3");
        producer.send(record3);
        //处理一些其它逻辑
        producer.commitTransaction();
    } catch (ProducerFencedException e) {
        producer.abortTransaction();
    }
    producer.close();
}
 
Example 14
Source File: DBusRouterKafkaWriteBolt.java    From DBus with Apache License 2.0 5 votes vote down vote up
private void sendKafka(String key, String data, String url, String topic, String ns, Tuple input, long offset) {
    if (StringUtils.isBlank(topic)) {
        logger.warn("namespace: {}, not obtain topic. ums: {}", ns, data);
        logger.info("kafka write bolt fail {}, topic {}", offset, topic);
        collector.fail(input);
        return;
    }
    // KafkaProducer<String, byte[]> producer = producerMap.get(url);
    Pair<String, KafkaProducer> pair = kafkaProducerManager.getKafkaClient(url);
    KafkaProducer<String, byte[]> producer = pair.getValue();
    if (producer != null) {
        producer.send(new ProducerRecord<String, byte[]>(topic, key, data.getBytes()), new Callback() {
            @Override
            public void onCompletion(RecordMetadata metadata, Exception exception) {
                if (exception == null) {
                    logger.info("kafka write bolt ack {}, topic {}, offset {}", offset, metadata.topic(), metadata.offset());
                    collector.ack(input);
                } else {
                    logger.error("kafka write bolt fail {}, topic {}", offset, metadata.topic());
                    logger.error("kafka write bolt fail {}", exception.getMessage());
                    collector.fail(input);
                }
            }
        });
    } else {
        collector.fail(input);
        logger.warn("namespace: {}, not obtain producer. sink:{} ums: {}", ns, url, data);
    }
}
 
Example 15
Source File: TestUtils.java    From uReplicator with Apache License 2.0 5 votes vote down vote up
public static void produceMessages(String bootstrapServer, String topicname, int messageCount,
    int numOfPartitions) {
  KafkaProducer producer = createProducer(bootstrapServer);
  for (int i = 0; i < messageCount; i++) {
    ProducerRecord<Byte[], Byte[]> record = new ProducerRecord(topicname, i % numOfPartitions,
        null,
        String.format("Test Value - %d", i).getBytes());
    producer.send(record);
  }
  producer.flush();
  producer.close();
}
 
Example 16
Source File: KafkaUtil.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
public static void writeToKafka() throws InterruptedException {
    Properties props = new Properties();
    props.put("bootstrap.servers", broker_list);
    props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); //key 序列化
    props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); //value 序列化
    KafkaProducer producer = new KafkaProducer<String, String>(props);

    MetricEvent metric = new MetricEvent();
    metric.setTimestamp(System.currentTimeMillis());
    metric.setName("mem");
    Map<String, String> tags = new HashMap<>();
    Map<String, Object> fields = new HashMap<>();

    tags.put("cluster", "zhisheng");
    tags.put("host_ip", "101.147.022.106");

    fields.put("used_percent", 90d);
    fields.put("max", 27244873d);
    fields.put("used", 17244873d);
    fields.put("init", 27244873d);

    metric.setTags(tags);
    metric.setFields(fields);

    ProducerRecord record = new ProducerRecord<String, String>(topic, null, null, GsonUtil.toJson(metric));
    producer.send(record);
    System.out.println("发送数据: " + GsonUtil.toJson(metric));

    producer.flush();
}
 
Example 17
Source File: TumblingWindowKafkaStream.java    From kafka-streams-ex with MIT License 4 votes vote down vote up
/** Runs the streams program, writing to the "long-counts-all" topic.
 *
 * @param args Not used.
 */
public static void main(String[] args) throws Exception {
    
    Properties config = new Properties();

    config.put(StreamsConfig.APPLICATION_ID_CONFIG,
        "tumbling-window-kafka-streams");
    config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,
        "localhost:9092");
    config.put(StreamsConfig.ZOOKEEPER_CONNECT_CONFIG,
        "localhost:2181");
    config.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG,
        Serdes.ByteArray().getClass().getName());
    config.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG,
        Serdes.Long().getClass().getName());

    KStreamBuilder builder = new KStreamBuilder();

    KStream<byte[], Long> longs = builder.stream(
        Serdes.ByteArray(), Serdes.Long(), "longs");

    // The tumbling windows will clear every ten seconds.
    KTable<Windowed<byte[]>, Long> longCounts =
        longs.groupByKey()
             .count(TimeWindows.of(10000L)
                               .until(10000L),
                    "long-counts");

    // Write to topics.
    longCounts.toStream((k,v) -> k.key())
              .to(Serdes.ByteArray(),
                  Serdes.Long(),
                  "long-counts-all");

    KafkaStreams streams = new KafkaStreams(builder, config);
    streams.start();

    // Now generate the data and write to the topic.
    Properties producerConfig = new Properties();
    producerConfig.put("bootstrap.servers", "localhost:9092");
    producerConfig.put("key.serializer",
                       "org.apache.kafka.common" +
                       ".serialization.ByteArraySerializer");
    producerConfig.put("value.serializer",
                       "org.apache.kafka.common" +
                       ".serialization.LongSerializer");

    KafkaProducer producer = 
        new KafkaProducer<byte[], Long>(producerConfig);

    Random rng = new Random(12345L);

    while(true) { 
        producer.send(new ProducerRecord<byte[], Long>(
            "longs", "A".getBytes(), rng.nextLong()%10));
        Thread.sleep(500L);
    } // Close infinite data generating loop.
}
 
Example 18
Source File: TransactionConsumeTransformProduce.java    From kafka_book_demo with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) {
    //初始化生产者和消费者
    KafkaConsumer<String, String> consumer =
            new KafkaConsumer<>(getConsumerProperties());
    consumer.subscribe(Collections.singletonList("topic-source"));
    KafkaProducer<String, String> producer =
            new KafkaProducer<>(getProducerProperties());
    //初始化事务
    producer.initTransactions();
    while (true) {
        ConsumerRecords<String, String> records =
                consumer.poll(Duration.ofMillis(1000));
        if (!records.isEmpty()) {
            Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
            //开启事务
            producer.beginTransaction();
            try {
                for (TopicPartition partition : records.partitions()) {
                    List<ConsumerRecord<String, String>> partitionRecords
                            = records.records(partition);
                    for (ConsumerRecord<String, String> record :
                            partitionRecords) {
                        //do some logical processing.
                        ProducerRecord<String, String> producerRecord =
                                new ProducerRecord<>("topic-sink", record.key(),
                                        record.value());
                        //消费-生产模型
                        producer.send(producerRecord);
                    }
                    long lastConsumedOffset = partitionRecords.
                            get(partitionRecords.size() - 1).offset();
                    offsets.put(partition,
                            new OffsetAndMetadata(lastConsumedOffset + 1));
                }
                //提交消费位移
                producer.sendOffsetsToTransaction(offsets,"groupId");
                //提交事务
                producer.commitTransaction();
            } catch (ProducerFencedException e) {
                //log the exception
                //中止事务
                producer.abortTransaction();
            }
        }
    }
}
 
Example 19
Source File: NativeKafkaWithStringDecoderTest.java    From hermes with Apache License 2.0 4 votes vote down vote up
@Test
public void testNative() throws IOException, InterruptedException, ExecutionException {
	String topic = "kafka.SimpleTextTopic";
	int msgNum = 200;
	final CountDownLatch countDown = new CountDownLatch(msgNum);

	Properties producerProps = new Properties();
	// Producer
	producerProps.put("bootstrap.servers", "");
	producerProps.put("value.serializer", StringSerializer.class.getCanonicalName());
	producerProps.put("key.serializer", StringSerializer.class.getCanonicalName());
	// Consumer
	Properties consumerProps = new Properties();
	consumerProps.put("zookeeper.connect", "");
	consumerProps.put("group.id", "GROUP_" + topic);

	final List<String> actualResult = new ArrayList<String>();
	final List<String> expectedResult = new ArrayList<String>();

	ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumerProps));
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(topic, 1);
	final List<KafkaStream<String, String>> streams = consumerConnector.createMessageStreams(topicCountMap,
	      new StringDecoder(null), new StringDecoder(null)).get(topic);
	for (final KafkaStream<String, String> stream : streams) {
		new Thread() {
			public void run() {
				for (MessageAndMetadata<String, String> msgAndMetadata : stream) {
					try {
						System.out.println("received: " + msgAndMetadata.message());
						actualResult.add(msgAndMetadata.message());
						countDown.countDown();
					} catch (Exception e) {
						e.printStackTrace();
					}
				}
			}
		}.start();
	}

	KafkaProducer<String, String> producer = new KafkaProducer<String, String>(producerProps);
	int i = 0;
	while (i < msgNum) {
		ProducerRecord<String, String> data = new ProducerRecord<String, String>(topic, "test-message" + i++);
		Future<RecordMetadata> send = producer.send(data);
		send.get();
		if (send.isDone()) {
			System.out.println("sending: " + data.value());
			expectedResult.add(data.value());
		}
	}

	countDown.await();

	Assert.assertArrayEquals(expectedResult.toArray(), actualResult.toArray());

	consumerConnector.shutdown();
	producer.close();
}
 
Example 20
Source File: KafkaExportITBase.java    From rya with Apache License 2.0 4 votes vote down vote up
/**
 * Test kafka without rya code to make sure kafka works in this environment.
 * If this test fails then its a testing environment issue, not with Rya.
 * Source: https://github.com/asmaier/mini-kafka
 */
@Test
public void embeddedKafkaTest() throws Exception {
    // create topic
    final String topic = "testTopic";
    AdminUtils.createTopic(zkUtils, topic, 1, 1, new Properties(), RackAwareMode.Disabled$.MODULE$);

    // setup producer
    final Properties producerProps = new Properties();
    producerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
    producerProps.setProperty("key.serializer", "org.apache.kafka.common.serialization.IntegerSerializer");
    producerProps.setProperty("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
    final KafkaProducer<Integer, byte[]> producer = new KafkaProducer<>(producerProps);

    // setup consumer
    final Properties consumerProps = new Properties();
    consumerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
    consumerProps.setProperty("group.id", "group0");
    consumerProps.setProperty("client.id", "consumer0");
    consumerProps.setProperty("key.deserializer", "org.apache.kafka.common.serialization.IntegerDeserializer");
    consumerProps.setProperty("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer");

    // to make sure the consumer starts from the beginning of the topic
    consumerProps.put("auto.offset.reset", "earliest");

    final KafkaConsumer<Integer, byte[]> consumer = new KafkaConsumer<>(consumerProps);
    consumer.subscribe(Arrays.asList(topic));

    // send message
    final ProducerRecord<Integer, byte[]> data = new ProducerRecord<>(topic, 42, "test-message".getBytes(StandardCharsets.UTF_8));
    producer.send(data);
    producer.close();

    // starting consumer
    final ConsumerRecords<Integer, byte[]> records = consumer.poll(3000);
    assertEquals(1, records.count());
    final Iterator<ConsumerRecord<Integer, byte[]>> recordIterator = records.iterator();
    final ConsumerRecord<Integer, byte[]> record = recordIterator.next();
    assertEquals(42, (int) record.key());
    assertEquals("test-message", new String(record.value(), StandardCharsets.UTF_8));
    consumer.close();
}