Java Code Examples for org.apache.kafka.clients.consumer.KafkaConsumer#poll()

The following examples show how to use org.apache.kafka.clients.consumer.KafkaConsumer#poll() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: OffsetCommitSyncPartition.java    From BigData-In-Practice with Apache License 2.0 8 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create();

    try {
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (TopicPartition partition : records.partitions()) {
                List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
                for (ConsumerRecord<String, String> record : partitionRecords) {
                    //do some logical processing.
                }
                long lastConsumedOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
                consumer.commitSync(Collections.singletonMap(partition,
                        new OffsetAndMetadata(lastConsumedOffset + 1)));
            }
        }
    } finally {
        consumer.close();
    }
}
 
Example 2
Source File: AtlasNotificationServerEmulator.java    From nifi with Apache License 2.0 6 votes vote down vote up
public void consume(Consumer<HookNotification> c) {
    Properties props = new Properties();
    props.put("bootstrap.servers", "localhost:9092");
    props.put("group.id", "test");
    props.put("enable.auto.commit", "true");
    props.put("auto.commit.interval.ms", "1000");
    props.put("session.timeout.ms", "30000");
    props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList("ATLAS_HOOK"));

    isStopped = false;
    while (!isStopped) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        for (ConsumerRecord<String, String> record : records) {
            final MessageDeserializer deserializer = NotificationInterface.NotificationType.HOOK.getDeserializer();
            final HookNotification m
                    = (HookNotification) deserializer.deserialize(record.value());
            c.accept(m);
        }
    }

    consumer.close();
}
 
Example 3
Source File: SeekDemo.java    From BigData-In-Practice with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create();
    consumer.poll(Duration.ofMillis(2000));
    Set<TopicPartition> assignment = consumer.assignment();
    System.out.println(assignment);
    for (TopicPartition tp : assignment) {
        consumer.seek(tp, 10);
    }
    // consumer.seek(new TopicPartition(ConsumerFactory.topic,0),10);
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
        //consume the record.
        for (ConsumerRecord<String, String> record : records) {
            System.out.println(record.offset() + ":" + record.value());
        }
    }
}
 
Example 4
Source File: TestStreamProcessor.java    From samza with Apache License 2.0 6 votes vote down vote up
/**
 * Consumes data from the topic until there are no new messages for a while
 * and asserts that the number of consumed messages is as expected.
 */
@SuppressWarnings("unchecked")
private void verifyNumMessages(KafkaConsumer consumer, String topic, int expectedNumMessages) {
  consumer.subscribe(Collections.singletonList(topic));

  int count = 0;
  int emptyPollCount = 0;

  while (count < expectedNumMessages && emptyPollCount < 5) {
    ConsumerRecords records = consumer.poll(5000);
    if (!records.isEmpty()) {
      for (ConsumerRecord record : (Iterable<ConsumerRecord>) records) {
        Assert.assertEquals(new String((byte[]) record.value()), String.valueOf(count));
        count++;
      }
    } else {
      emptyPollCount++;
    }
  }

  Assert.assertEquals(count, expectedNumMessages);
}
 
Example 5
Source File: FlowLineCheckService.java    From DBus with Apache License 2.0 6 votes vote down vote up
private boolean thirdStep(KafkaConsumer<String, byte[]> consumerThird, long time, Map<String, Boolean> retMap, long offset) {
    boolean isOk = false;
    try {
        long start = System.currentTimeMillis();
        while ((System.currentTimeMillis() - start < 1000 * 20) && !isOk) {
            ConsumerRecords<String, byte[]> records = consumerThird.poll(1000);
            for (ConsumerRecord<String, byte[]> record : records) {
                //if (StringUtils.contains(record.key(), String.valueOf(time))) {
                if (record.offset() >= offset) {
                    isOk = true;
                    break;
                }
            }
        }
    } catch (Exception e) {
        retMap.put("status", false);
        logger.error("auto check table third step error.", e);
    }
    return isOk;
}
 
Example 6
Source File: OffsetCommitSyncBatch.java    From kafka_book_demo with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    Properties props = initConfig();
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    final int minBatchSize = 200;
    List<ConsumerRecord> buffer = new ArrayList<>();
    while (running.get()) {
        ConsumerRecords<String, String> records = consumer.poll(1000);
        for (ConsumerRecord<String, String> record : records) {
            buffer.add(record);
        }
        if (buffer.size() >= minBatchSize) {
            //do some logical processing with buffer.
            consumer.commitSync();
            buffer.clear();
        }
    }
}
 
Example 7
Source File: EarliestNativeTest.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {

    Map<String, String> config = new HashMap<>();
    config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    config.put(ConsumerConfig.GROUP_ID_CONFIG, "my-group");
    config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
    config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

    KafkaConsumer consumer = new KafkaConsumer(config);
    consumer.subscribe(Collections.singleton("my-topic"));

    while (true) {
      ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
      for (ConsumerRecord<String, String> record: records) {
        System.out.println(record);
      }
    }
  }
 
Example 8
Source File: KafkaConsumerExample.java    From client-examples with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumerConfig config = KafkaConsumerConfig.fromEnv();
    Properties props = KafkaConsumerConfig.createProperties(config);
    int receivedMsgs = 0;

    if (System.getenv("JAEGER_SERVICE_NAME") != null)   {
        Tracer tracer = Configuration.fromEnv().getTracer();
        GlobalTracer.registerIfAbsent(tracer);

        props.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, TracingConsumerInterceptor.class.getName());
    }

    boolean commit = !Boolean.parseBoolean(config.getEnableAutoCommit());
    KafkaConsumer consumer = new KafkaConsumer(props);
    consumer.subscribe(Collections.singletonList(config.getTopic()));

    while (receivedMsgs < config.getMessageCount()) {
        ConsumerRecords<String, String> records = consumer.poll(Long.MAX_VALUE);
        for (ConsumerRecord<String, String> record : records) {
            log.info("Received message:");
            log.info("\tpartition: {}", record.partition());
            log.info("\toffset: {}", record.offset());
            log.info("\tvalue: {}", record.value());
            receivedMsgs++;
        }
        if (commit) {
            consumer.commitSync();
        }
    }
}
 
Example 9
Source File: TrafficControlIntegrationTest.java    From data-highway with Apache License 2.0 5 votes vote down vote up
private <T> List<T> fetchMessages(KafkaConsumer<?, T> consumer, int messagesCount) {
  List<T> result = new ArrayList<>();
  while (result.size() < messagesCount) {
    ConsumerRecords<?, T> consumerRecords = consumer.poll(100L);
    for (ConsumerRecord<?, T> record : consumerRecords) {
      result.add(record.value());
    }
  }
  return result;
}
 
Example 10
Source File: CaseController.java    From skywalking with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {
    Properties consumerProperties = new Properties();
    consumerProperties.put("bootstrap.servers", bootstrapServers);
    consumerProperties.put("group.id", "testGroup");
    consumerProperties.put("enable.auto.commit", "true");
    consumerProperties.put("auto.commit.interval.ms", "1000");
    consumerProperties.put("auto.offset.reset", "earliest");
    consumerProperties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProperties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProperties);
    consumer.subscribe(Arrays.asList(topicName));
    int i = 0;
    while (i++ <= 10) {
        try {
            Thread.sleep(1 * 1000);
        } catch (InterruptedException e) {
        }

        ConsumerRecords<String, String> records = consumer.poll(100);

        if (!records.isEmpty()) {
            for (ConsumerRecord<String, String> record : records) {
                logger.info("header: {}", new String(record.headers()
                                                           .headers("TEST")
                                                           .iterator()
                                                           .next()
                                                           .value()));
                logger.info("offset = {}, key = {}, value = {}", record.offset(), record.key(), record.value());
            }
            break;
        }
    }

    consumer.close();
}
 
Example 11
Source File: TestConsumer.java    From java-study with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    Properties props = new Properties();

    props.put("bootstrap.servers", "192.169.0.23:9092");
    System.out.println("this is the group part test 1");
    //消费者的组id
    props.put("group.id", "GroupA");//这里是GroupA或者GroupB

    props.put("enable.auto.commit", "true");
    props.put("auto.commit.interval.ms", "1000");

    //从poll(拉)的回话处理时长
    props.put("session.timeout.ms", "30000");
    //poll的数量限制
    //props.put("max.poll.records", "100");

    props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
    //订阅主题列表topic
    consumer.subscribe(Arrays.asList("foo"));
    while (true) {
        ConsumerRecords<String, String> records =consumer.poll(100);
        for (ConsumerRecord<String, String> record : records)
            // 正常这里应该使用线程池处理,不应该在这里处理
            System.out.printf("offset = %d, key = %s, value = %s", record.offset(), record.key(), record.value()+"\n");
    }
}
 
Example 12
Source File: TestKafkaReplication.java    From hbase-connect-kafka with Apache License 2.0 5 votes vote down vote up
@Test
 public void testCustomReplicationEndpoint() throws Exception {
	try {
        Map<TableName, List<String>> tableCfs = new HashMap<>();
        List<String> cfs = new ArrayList<>();
        cfs.add(COLUMN_FAMILY);
        tableCfs.put(TABLE_NAME, cfs);

        createTestTable();
        addPeer(utility.getConfiguration(), PEER_NAME, tableCfs);
			int numberOfRecords = 10;
        addData(numberOfRecords);

        final KafkaConsumer kafkaConsumer = createAndGetKafkaConsumer();
       final AtomicInteger totalRecords = new AtomicInteger(0);
        kafkaConsumer.subscribe(Collections.singletonList(TABLE_NAME.getNameAsString()));
       while (totalRecords.get() < numberOfRecords) {
          ConsumerRecords<byte[], HRow> consumerRecords = kafkaConsumer.poll(1000);
          if(consumerRecords != null && !consumerRecords.isEmpty()) {
	            consumerRecords.forEach(record -> {
		              final String expectedRowkey = String.format(ROWKEY, totalRecords.getAndAdd(1));
			            Assert.assertEquals(expectedRowkey, Bytes.toString(record.value().getRowKey()));
	            });
	        }
       }
       kafkaConsumer.close();
     } finally {
        removePeer();
     }
}
 
Example 13
Source File: PepperBoxLoadGenTest.java    From pepper-box with Apache License 2.0 5 votes vote down vote up
@Test
public void consoleLoadGenTest() throws IOException {
    File schemaFile = File.createTempFile("json", ".schema");
    schemaFile.deleteOnExit();
    FileWriter schemaWriter = new FileWriter(schemaFile);
    schemaWriter.write(TestInputUtils.testSchema);
    schemaWriter.close();

    File producerFile = File.createTempFile("producer", ".properties");
    producerFile.deleteOnExit();
    FileWriter producerPropsWriter = new FileWriter(producerFile);
    producerPropsWriter.write(String.format(TestInputUtils.producerProps, BROKERHOST, BROKERPORT, ZKHOST, zkServer.port()));
    producerPropsWriter.close();

    String vargs []  = new String[]{"--schema-file", schemaFile.getAbsolutePath(), "--producer-config-file", producerFile.getAbsolutePath(), "--throughput-per-producer", "10", "--test-duration", "1", "--num-producers", "1"};
    PepperBoxLoadGenerator.main(vargs);

    Properties consumerProps = new Properties();
    consumerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
    consumerProps.setProperty("group.id", "group");
    consumerProps.setProperty("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.put("auto.offset.reset", "earliest");
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps);
    consumer.subscribe(Arrays.asList(TOPIC));
    ConsumerRecords<String, String> records = consumer.poll(30000);
    Assert.assertTrue("PepperBoxLoadGenerator validation failed", records.count() > 0);

}
 
Example 14
Source File: DemoConsumerManualCommit.java    From KafkaExample with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
		args = new String[] { "kafka0:19092", "gender-amount", "group4", "consumer2" };
		if (args == null || args.length != 4) {
			System.err.println(
					"Usage:\n\tjava -jar kafka_consumer.jar ${bootstrap_server} ${topic_name} ${group_name} ${client_id}");
			System.exit(1);
		}
		String bootstrap = args[0];
		String topic = args[1];
		String groupid = args[2];
		String clientid = args[3];
		
		Properties props = new Properties();
		props.put("bootstrap.servers", bootstrap);
		props.put("group.id", groupid);
		props.put("enable.auto.commit", "false");
		props.put("key.deserializer", StringDeserializer.class.getName());
		props.put("value.deserializer", DoubleDeserializer.class.getName());
		props.put("max.poll.interval.ms", "300000");
		props.put("max.poll.records", "500");
		props.put("auto.offset.reset", "earliest");
		KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
		consumer.subscribe(Arrays.asList(topic));
		AtomicLong atomicLong = new AtomicLong();
		while (true) {
			ConsumerRecords<String, String> records = consumer.poll(100);
			records.forEach(record -> {
				System.out.printf("client : %s , topic: %s , partition: %d , offset = %d, key = %s, value = %s%n",
						clientid, record.topic(), record.partition(), record.offset(), record.key(), record.value());
				if (atomicLong.get() % 10 == 0) {
//					consumer.commitSync();
				}
			});
		}
	}
 
Example 15
Source File: AtLeastOnceConsumer.java    From javabase with Apache License 2.0 5 votes vote down vote up
private static void processRecords(KafkaConsumer<String, String> consumer) throws InterruptedException {
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        long lastOffset = 0;
        for (ConsumerRecord<String, String> record : records) {
            System.out.printf("\n\roffset = %d, key = %s, value = %s", record.offset(), record.key(), record.value());
            lastOffset = record.offset();
        }
        System.out.println("lastOffset read: " + lastOffset);
        process();

        //如果我们注释下面这行,消费者消费消息的话不会提交offset  会重复消费信息
        consumer.commitSync();
    }
}
 
Example 16
Source File: KafkaReadFunction.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
@Override
public Iterator<ExecRow> call(Integer partition) throws Exception {
    Properties props = new Properties();

    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);

    String consumer_id = "spark-consumer-dss-krf-"+UUID.randomUUID();
    props.put(ConsumerConfig.GROUP_ID_CONFIG, consumer_id);
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, consumer_id);

    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ExternalizableDeserializer.class.getName());
    props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

    KafkaConsumer<Integer, Externalizable> consumer = new KafkaConsumer<Integer, Externalizable>(props);
    consumer.assign(Arrays.asList(new TopicPartition(topicName, partition)));

    return new Iterator<ExecRow>() {
        Iterator<ConsumerRecord<Integer, Externalizable>> it = null;

        @Override
        public boolean hasNext() {
            if (it == null) {
                ConsumerRecords<Integer, Externalizable> records = null;
                while (records == null || records.isEmpty()) {
                    records = consumer.poll( java.time.Duration.ofMillis(1000) );
                    if (TaskContext.get().isInterrupted()) {
                        consumer.close();
                        throw new TaskKilledException();
                    }
                }
                it = records.iterator();
            }
            if (it.hasNext()) {
                return true;
            }
            else {
                consumer.close();
                return false;
            }
        }

        @Override
        public ExecRow next() {
            return (ExecRow)it.next().value();
        }
    };
}
 
Example 17
Source File: AvroKafkaConsumer.java    From blog with MIT License 4 votes vote down vote up
public static void main(String[] args) {

    /** TODO: 使用 Avro 解析默认 */
    Schema.Parser parser = new Schema.Parser();
    Schema schema = parser.parse(AvroKafkaProducer.USER_SCHEMA);

    /** TODO: 使用 Bijection 将对象序列化转成字节数组 */
    Injection<GenericRecord, byte[]> recordInjection = GenericAvroCodecs.toBinary(schema);

    /** TODO: 设置 Consumer 属性 */
    Properties properties = new Properties();
    /** TODO: Kafka 服务地址 */
    properties.put(
        ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "node-160:9092,node-161:9092,node-162:9092");
    /** TODO: Key 序列化类 */
    properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    /** TODO: Value 序列化类 */
    properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class);
    /** TODO: Consumer 组 */
    properties.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_avro");

    /** TODO: 创建 Consumer */
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(properties);

    /** TODO: 订阅 Topic */
    consumer.subscribe(Pattern.compile("^topic01*$"));
    try {
      while (true) {
        /** TODO: 设置间隔多长时间获取消息 */
        ConsumerRecords<String, byte[]> records = consumer.poll(Duration.ofSeconds(1));
        records.forEach(
            record -> {
              /** TODO: 反序列化字节数组 */
              GenericRecord genericRecord = recordInjection.invert(record.value()).get();
              System.out.printf(
                  "partition = %d, offset = %d, name = %s, age = %d %n",
                  record.partition(),
                  record.offset(),
                  genericRecord.get("name"),
                  genericRecord.get("age"));
            });
      }
    } finally {
      /** TODO: 关闭 Consumer */
      consumer.close();
    }
  }
 
Example 18
Source File: KryoConsumerExample.java    From kafka-examples with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) {
    ArgumentParser parser = argParser();

    try {
        Namespace res = parser.parseArgs(args);

        /* parse args */
        String brokerList = res.getString("bootstrap.servers");
        String topic = res.getString("topic");


        Properties consumerConfig = new Properties();
        consumerConfig.put("group.id", "my-group");
        consumerConfig.put("bootstrap.servers", brokerList);
        consumerConfig.put("auto.offset.reset", "earliest");
        consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "kafka.examples.kryo.serde.KryoDeserializer");
        consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "kafka.examples.kryo.serde.KryoDeserializer");

        KafkaConsumer<String, Object> consumer = new KafkaConsumer<>(consumerConfig);
        consumer.subscribe(Collections.singletonList(topic));

        while (true) {
            ConsumerRecords<String, Object> records = consumer.poll(1000);
            for (ConsumerRecord<String, Object> record : records) {
                System.out.printf("Received Message topic =%s, partition =%s, offset = %d, key = %s, value = %s\n", record.topic(), record.partition(), record.offset(), record.key(), record.value());
            }

            consumer.commitSync();
        }


    } catch (ArgumentParserException e) {
        if (args.length == 0) {
            parser.printHelp();
            System.exit(0);
        } else {
            parser.handleError(e);
            System.exit(1);
        }
    }

}
 
Example 19
Source File: PepperBoxSamplerTest.java    From pepper-box with Apache License 2.0 4 votes vote down vote up
@Test
public void plainTextKeyedMessageSamplerTest() throws IOException {

    PepperBoxKafkaSampler sampler = new PepperBoxKafkaSampler();
    Arguments arguments = sampler.getDefaultParameters();
    arguments.removeArgument(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG);
    arguments.removeArgument(ProducerKeys.KAFKA_TOPIC_CONFIG);
    arguments.removeArgument(ProducerKeys.ZOOKEEPER_SERVERS);
    arguments.removeArgument(PropsKeys.KEYED_MESSAGE_KEY);
    arguments.addArgument(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BROKERHOST + ":" + BROKERPORT);
    arguments.addArgument(ProducerKeys.ZOOKEEPER_SERVERS, ZKHOST + ":" + zkServer.port());
    arguments.addArgument(ProducerKeys.KAFKA_TOPIC_CONFIG, TOPIC);
    arguments.addArgument(PropsKeys.KEYED_MESSAGE_KEY,"YES");

    jmcx = new JavaSamplerContext(arguments);
    sampler.setupTest(jmcx);

    PlainTextConfigElement keyConfigElement = new PlainTextConfigElement();
    keyConfigElement.setJsonSchema(TestInputUtils.testKeySchema);
    keyConfigElement.setPlaceHolder(PropsKeys.MSG_KEY_PLACEHOLDER);
    keyConfigElement.iterationStart(null);

    PlainTextConfigElement valueConfigElement = new PlainTextConfigElement();
    valueConfigElement.setJsonSchema(TestInputUtils.testSchema);
    valueConfigElement.setPlaceHolder(PropsKeys.MSG_PLACEHOLDER);
    valueConfigElement.iterationStart(null);

    Object keySent = JMeterContextService.getContext().getVariables().getObject(PropsKeys.MSG_KEY_PLACEHOLDER);
    Object valueSent = JMeterContextService.getContext().getVariables().getObject(PropsKeys.MSG_PLACEHOLDER);
    sampler.runTest(jmcx);

    Properties consumerProps = new Properties();
    consumerProps.setProperty("bootstrap.servers", BROKERHOST + ":" + BROKERPORT);
    consumerProps.setProperty("group.id", "group0");
    consumerProps.setProperty("client.id", "consumer0");
    consumerProps.setProperty("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.put("auto.offset.reset", "earliest");
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps);
    consumer.subscribe(Arrays.asList(TOPIC));
    ConsumerRecords<String, String> records = consumer.poll(30000);
    Assert.assertEquals(1, records.count());
    for (ConsumerRecord<String, String> record : records){
        Assert.assertEquals("Failed to validate key of produced message", keySent.toString(), record.key());
        Assert.assertEquals("Failed to validate value of produced message", valueSent.toString(), record.value());
    }

    sampler.teardownTest(jmcx);
}
 
Example 20
Source File: TransactionalWordCount.java    From tutorials with MIT License 2 votes vote down vote up
public static void main(String[] args) {

        KafkaConsumer<String, String> consumer = createKafkaConsumer();
        KafkaProducer<String, String> producer = createKafkaProducer();

        producer.initTransactions();

        try {

            while (true) {

                ConsumerRecords<String, String> records = consumer.poll(ofSeconds(60));

                Map<String, Integer> wordCountMap = records.records(new TopicPartition(INPUT_TOPIC, 0))
                        .stream()
                        .flatMap(record -> Stream.of(record.value().split(" ")))
                        .map(word -> Tuple.of(word, 1))
                        .collect(Collectors.toMap(tuple -> tuple.getKey(), t1 -> t1.getValue(), (v1, v2) -> v1 + v2));

                producer.beginTransaction();

                wordCountMap.forEach((key, value) -> producer.send(new ProducerRecord<String, String>(OUTPUT_TOPIC, key, value.toString())));

                Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>();

                for (TopicPartition partition : records.partitions()) {
                    List<ConsumerRecord<String, String>> partitionedRecords = records.records(partition);
                    long offset = partitionedRecords.get(partitionedRecords.size() - 1).offset();

                    offsetsToCommit.put(partition, new OffsetAndMetadata(offset + 1));
                }

                producer.sendOffsetsToTransaction(offsetsToCommit, CONSUMER_GROUP_ID);
                producer.commitTransaction();

            }

        } catch (KafkaException e) {

            producer.abortTransaction();

        }


    }