Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecords#count()

The following examples show how to use org.apache.kafka.clients.consumer.ConsumerRecords#count() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaQueue.java    From NetDiscovery with Apache License 2.0 6 votes vote down vote up
@Override
public Request poll(String spiderName) {
    // max.poll.records=1 强制消费一条数据
    ConsumerRecords<String, Request> records = consumer.poll(timeout);
    if (records!=null && records.iterator()!=null && records.count()>0) {

        consumer.commitAsync();
        ConsumerRecord<String, Request> record = records.iterator().next();
        log.info("kafka consumer result count: {}, data: {}", records.count(), record);

        this.currentOffset = record.offset();

        return record.value();
    }
    return null;
}
 
Example 2
Source File: AbstractInventoryCountTests.java    From spring-cloud-stream-samples with Apache License 2.0 6 votes vote down vote up
/**
 * Consume the actual events from the output topic.
 * This implementation uses a {@link Consumer}, assuming a (an embedded) Kafka Broker but may be overridden.
 * @param expectedCount the expected number of messages is known. This avoids a timeout delay if all is well.
 *
 * @return the consumed data.
 */
protected Map<ProductKey, InventoryCountEvent> consumeActualInventoryCountEvents(int expectedCount) {
    Map<ProductKey, InventoryCountEvent> inventoryCountEvents = new LinkedHashMap<>();
    int receivedCount = 0;
    while (receivedCount < expectedCount) {
        ConsumerRecords<ProductKey, InventoryCountEvent> records = KafkaTestUtils.getRecords(consumer, 1000);
        if (records.isEmpty()) {
            logger.error("No more records received. Expected {} received {}.", expectedCount, receivedCount);
            break;
        }
        receivedCount += records.count();
        for (Iterator<ConsumerRecord<ProductKey, InventoryCountEvent>> it = records.iterator(); it.hasNext(); ) {
            ConsumerRecord<ProductKey, InventoryCountEvent> consumerRecord = it.next();
            logger.debug("consumed " + consumerRecord.key().getProductCode() + " = " + consumerRecord.value().getCount());
            inventoryCountEvents.put(consumerRecord.key(), consumerRecord.value());
        }
    }
    return inventoryCountEvents;
}
 
Example 3
Source File: ConsumerLease.java    From nifi with Apache License 2.0 6 votes vote down vote up
/**
 * Executes a poll on the underlying Kafka Consumer and creates any new
 * flowfiles necessary or appends to existing ones if in demarcation mode.
 */
void poll() {
    /**
     * Implementation note:
     * Even if ConsumeKafka is not scheduled to poll due to downstream connection back-pressure is engaged,
     * for longer than session.timeout.ms (defaults to 10 sec), Kafka consumer sends heartbeat from background thread.
     * If this situation lasts longer than max.poll.interval.ms (defaults to 5 min), Kafka consumer sends
     * Leave Group request to Group Coordinator. When ConsumeKafka processor is scheduled again, Kafka client checks
     * if this client instance is still a part of consumer group. If not, it rejoins before polling messages.
     * This behavior has been fixed via Kafka KIP-62 and available from Kafka client 0.10.1.0.
     */
    try {
        final ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(10);
        lastPollEmpty = records.count() == 0;
        processRecords(records);
    } catch (final ProcessException pe) {
        throw pe;
    } catch (final Throwable t) {
        this.poison();
        throw t;
    }
}
 
Example 4
Source File: KafkaLatencyBenchmark.java    From rya with Apache License 2.0 6 votes vote down vote up
private void handle(final ConsumerRecords<String, ? extends BindingSet> records) {
    if(records.count() > 0) {
        logger.debug("Received {} records", records.count());
    }
    for(final ConsumerRecord<String, ? extends BindingSet> record: records){
        final BindingSet result = record.value();
        logger.debug("Received BindingSet: {}", result);

        final String type = result.getBinding("type").getValue().stringValue();
        final long total = Long.parseLong(result.getBinding("total").getValue().stringValue());

        final Stat stat = typeToStatMap.get(type);
        if(stat == null) {
            logger.warn("Not expecting to receive type: {}", type);
        } else {
            stat.fluoTotal.set(total);
        }
    }
}
 
Example 5
Source File: ConsumerLease.java    From nifi with Apache License 2.0 6 votes vote down vote up
/**
 * Executes a poll on the underlying Kafka Consumer and creates any new
 * flowfiles necessary or appends to existing ones if in demarcation mode.
 */
void poll() {
    /**
     * Implementation note:
     * Even if ConsumeKafka is not scheduled to poll due to downstream connection back-pressure is engaged,
     * for longer than session.timeout.ms (defaults to 10 sec), Kafka consumer sends heartbeat from background thread.
     * If this situation lasts longer than max.poll.interval.ms (defaults to 5 min), Kafka consumer sends
     * Leave Group request to Group Coordinator. When ConsumeKafka processor is scheduled again, Kafka client checks
     * if this client instance is still a part of consumer group. If not, it rejoins before polling messages.
     * This behavior has been fixed via Kafka KIP-62 and available from Kafka client 0.10.1.0.
     */
    try {
        final ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(10);
        lastPollEmpty = records.count() == 0;
        processRecords(records);
    } catch (final ProcessException pe) {
        throw pe;
    } catch (final Throwable t) {
        this.poison();
        throw t;
    }
}
 
Example 6
Source File: KafkaReciverTest.java    From netty-pubsub with MIT License 6 votes vote down vote up
public static void main(String[] args) {
	    Properties properties = new Properties();
        properties.put("bootstrap.servers", "127.0.0.1:9092");
        properties.put("group.id", "group-3");
        properties.put("enable.auto.commit", "true");
        properties.put("auto.commit.interval.ms", "1000");
        properties.put("auto.offset.reset", "earliest");
        properties.put("session.timeout.ms", "30000");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        //properties.put("value.deserializer", "client.KafkaDeSerialization");
        KafkaConsumer<String, TestMsg> kafkaConsumer = new KafkaConsumer<>(properties);
        kafkaConsumer.subscribe(Arrays.asList("HelloWorld"));
        while (true) {
            ConsumerRecords<String, TestMsg> records = kafkaConsumer.poll(100);
            if(records.count()>0)
            for (ConsumerRecord<String, TestMsg> record : records) {
                System.out.printf("offset = %d, value = %s", record.offset(), record.value());
                System.out.println();
            }
        }
}
 
Example 7
Source File: SinkerKafkaSource.java    From DBus with Apache License 2.0 6 votes vote down vote up
public List<String> poll() {
    /* 快速取,如果没有就立刻返回 */
    ConsumerRecords<String, String> records = consumer.poll(1000);
    if (records.count() == 0) {
        count++;
        if (count % 60 == 0) {
            count = 0;
            LOG.info("[sinker] SinkerKafkaSource running on {} (offset={}).......", topic, consumer.position(topicPartition));
        }
        return null;
    }

    List<String> list = new ArrayList<>();
    long maxOffset = 0l;
    for (ConsumerRecord<String, String> record : records) {
        if (record.offset() > maxOffset) maxOffset = record.offset();
        list.add(record.key());
    }

    LOG.info("[sinker] SinkerKafkaSource got {} records, max offset: {}", records.count(), maxOffset);
    return list;
}
 
Example 8
Source File: ConsumerLease.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
/**
 * Executes a poll on the underlying Kafka Consumer and creates any new
 * flowfiles necessary or appends to existing ones if in demarcation mode.
 */
void poll() {
    /**
     * Implementation note: If we take too long (30 secs?) between kafka
     * poll calls and our own record processing to any subsequent poll calls
     * or the commit we can run into a situation where the commit will
     * succeed to the session but fail on committing offsets. This is
     * apparently different than the Kafka scenario of electing to rebalance
     * for other reasons but in this case is due a session timeout. It
     * appears Kafka KIP-62 aims to offer more control over the meaning of
     * various timeouts. If we do run into this case it could result in
     * duplicates.
     */
    try {
        final ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(10);
        lastPollEmpty = records.count() == 0;
        processRecords(records);
    } catch (final Throwable t) {
        this.poison();
        throw t;
    }
}
 
Example 9
Source File: ConsumerLease.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
/**
 * Executes a poll on the underlying Kafka Consumer and creates any new
 * flowfiles necessary or appends to existing ones if in demarcation mode.
 */
void poll() {
    /**
     * Implementation note: If we take too long (30 secs?) between kafka
     * poll calls and our own record processing to any subsequent poll calls
     * or the commit we can run into a situation where the commit will
     * succeed to the session but fail on committing offsets. This is
     * apparently different than the Kafka scenario of electing to rebalance
     * for other reasons but in this case is due a session timeout. It
     * appears Kafka KIP-62 aims to offer more control over the meaning of
     * various timeouts. If we do run into this case it could result in
     * duplicates.
     */
    try {
        final ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(10);
        lastPollEmpty = records.count() == 0;
        processRecords(records);
    } catch (final Throwable t) {
        this.poison();
        throw t;
    }
}
 
Example 10
Source File: NakadiKafkaConsumer.java    From nakadi with MIT License 6 votes vote down vote up
@Override
public List<ConsumedEvent> readEvents() {
    final ConsumerRecords<byte[], byte[]> records = kafkaConsumer.poll(pollTimeout);
    if (records.isEmpty()) {
        return Collections.emptyList();
    }
    final ArrayList<ConsumedEvent> result = new ArrayList<>(records.count());
    for (final ConsumerRecord<byte[], byte[]> record : records) {
        final KafkaCursor cursor = new KafkaCursor(record.topic(), record.partition(), record.offset());
        final Timeline timeline = timelineMap.get(new TopicPartition(record.topic(), record.partition()));

        result.add(new ConsumedEvent(
                record.value(),
                cursor.toNakadiCursor(timeline),
                record.timestamp(),
                EventOwnerHeader.deserialize(record)));
    }
    return result;
}
 
Example 11
Source File: KafkaReader.java    From DBus with Apache License 2.0 5 votes vote down vote up
/**
 * run
 * Check the performance of pulling a whole table
 *
 * @throws Exception
 */
public void run() throws Exception {
    int readCount = 0;
    try {
        this.consumer = createConsumer();
        // Fetch data from the consumer
        while (running) {
            // Wait for 100ms
            ConsumerRecords<String, String> records = consumer.poll(1000);
            if (records.count() == 0) {
                System.out.print(".");
                continue;
            }
            for (ConsumerRecord<String, String> record : records) {
                if (readCount >= maxLength) {
                    running = false;
                    break;
                }
                readCount++;

                System.out.println("");
                System.out.println("offset: " + record.offset() + ", key:" + record.key());
                System.out.println(record.value());
            }
        }

    } catch (Exception e) {
        logger.error("Exception was caught when read kafka", e);
        throw e;
    } finally {
        System.out.println("");

        consumer.close();
        logger.info("Finished read kafka");
    }
}
 
Example 12
Source File: EventMessagingConsumer.java    From cqrs-es-kafka with MIT License 5 votes vote down vote up
@Override
public void run() {
    ConsumerRecords<String, String> events;
    EventHandler handler;
    Long pollTimeout = this.timeout;
    KafkaConsumer<String, String> consumer = getKafkaEventConsumer();

    log.info("Initializing event handler registry. Found the following event handlers: " + this.printRegistry(this.handlerRegistry));

    while (true) {
        events = consumer.poll(this.timeout);
        if (events != null && events.count() == 0) {
            pollTimeout++;
        } else {
            log.info("Polled {} domain events for query part update after {} timeout", events.count(), pollTimeout);
            pollTimeout = this.timeout;

            for (ConsumerRecord<String, String> event : events) {
                handler = this.handlerRegistry.get(event.key());
                if (handler != null) {
                    handler.handleMessage(gson.fromJson(event.value(), new TypeToken<Map<String, Object>>() {
                    }.getType()));
                } else {
                    log.warn("Couldn't find an event handler for event type: {" + event.key() + "}");
                }
            }
        }
    }
}
 
Example 13
Source File: KafkaRangerAuthorizerSASLSSLTest.java    From ranger with Apache License 2.0 4 votes vote down vote up
@Test
public void testAuthorizedRead() throws Exception {
    // Create the Producer
    Properties producerProps = new Properties();
    producerProps.put("bootstrap.servers", "localhost:" + port);
    producerProps.put("acks", "all");
    producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_SSL");
    producerProps.put("sasl.mechanism", "PLAIN");
    
    producerProps.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS");
    producerProps.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, serviceKeystorePath);
    producerProps.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "sspass");
    producerProps.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, "skpass");
    producerProps.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, truststorePath);
    producerProps.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "security");
    
    final Producer<String, String> producer = new KafkaProducer<>(producerProps);
    
    // Create the Consumer
    Properties consumerProps = new Properties();
    consumerProps.put("bootstrap.servers", "localhost:" + port);
    consumerProps.put("group.id", "test");
    consumerProps.put("enable.auto.commit", "true");
    consumerProps.put("auto.offset.reset", "earliest");
    consumerProps.put("auto.commit.interval.ms", "1000");
    consumerProps.put("session.timeout.ms", "30000");
    consumerProps.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_SSL");
    consumerProps.put("sasl.mechanism", "PLAIN");
    
    consumerProps.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS");
    consumerProps.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, clientKeystorePath);
    consumerProps.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "cspass");
    consumerProps.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, "ckpass");
    consumerProps.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, truststorePath);
    consumerProps.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "security");
    
    final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps);
    consumer.subscribe(Arrays.asList("test"));
    
    // Send a message
    producer.send(new ProducerRecord<String, String>("test", "somekey", "somevalue"));
    producer.flush();
    
    // Poll until we consume it
    
    ConsumerRecord<String, String> record = null;
    for (int i = 0; i < 1000; i++) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        if (records.count() > 0) {
            record = records.iterator().next();
            break;
        }
        Thread.sleep(1000);
    }

    Assert.assertNotNull(record);
    Assert.assertEquals("somevalue", record.value());

    producer.close();
    consumer.close();
}
 
Example 14
Source File: KafkaRangerAuthorizerTest.java    From ranger with Apache License 2.0 4 votes vote down vote up
@Test
public void testAuthorizedReadUsingTagPolicy() throws Exception {
    // Create the Producer
    Properties producerProps = new Properties();
    producerProps.put("bootstrap.servers", "localhost:" + port);
    producerProps.put("acks", "all");
    producerProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    producerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
    producerProps.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS");
    producerProps.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, serviceKeystorePath);
    producerProps.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "sspass");
    producerProps.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, "skpass");
    producerProps.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, truststorePath);
    producerProps.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "security");

    final Producer<String, String> producer = new KafkaProducer<>(producerProps);

    // Create the Consumer
    Properties consumerProps = new Properties();
    consumerProps.put("bootstrap.servers", "localhost:" + port);
    consumerProps.put("group.id", "test");
    consumerProps.put("enable.auto.commit", "true");
    consumerProps.put("auto.offset.reset", "earliest");
    consumerProps.put("auto.commit.interval.ms", "1000");
    consumerProps.put("session.timeout.ms", "30000");
    consumerProps.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    consumerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SSL");
    consumerProps.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "JKS");
    consumerProps.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, clientKeystorePath);
    consumerProps.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, "cspass");
    consumerProps.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, "ckpass");
    consumerProps.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, truststorePath);
    consumerProps.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "security");

    final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProps);
    consumer.subscribe(Arrays.asList("messages"));

    // Send a message
    producer.send(new ProducerRecord<String, String>("messages", "somekey", "somevalue"));
    producer.flush();

    // Poll until we consume it

    ConsumerRecord<String, String> record = null;
    for (int i = 0; i < 1000; i++) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        if (records.count() > 0) {
            record = records.iterator().next();
            break;
        }
        Thread.sleep(1000);
    }

    Assert.assertNotNull(record);
    Assert.assertEquals("somevalue", record.value());

    producer.close();
    consumer.close();
}
 
Example 15
Source File: KafkaMessageLogReceiverEndpoint.java    From synapse with Apache License 2.0 4 votes vote down vote up
private ChannelPosition processMessages(final ChannelPosition startFrom,
                                        final Predicate<ShardResponse> stopCondition,
                                        final ConsumerRebalanceHandler rebalanceHandler,
                                        final KafkaRecordsConsumer recordsConsumer) {
    final long firstMessageLogTime = System.currentTimeMillis();
    final AtomicLong shardMessagesCounter = new AtomicLong(0);
    final AtomicLong previousMessageLogTime = new AtomicLong(System.currentTimeMillis());
    final AtomicLong previousLoggedMessageCounterMod = new AtomicLong(0), previousLoggedMessageCounter = new AtomicLong(0);
    final AtomicBoolean stopConditionMet = new AtomicBoolean(false);
    ChannelPosition channelPosition = startFrom;

    try {
        do {
            final ConsumerRecords<String, String> records = kafkaConsumer.poll(ofMillis(KAFKA_CONSUMER_POLLING_DURATION));
            if (rebalanceHandler.shardsAssignedAndPositioned()) {
                final ChannelResponse channelResponse = recordsConsumer.apply(records);
                channelPosition = channelResponse.getChannelPosition();
                stopConditionMet.set(channelResponse.getShardResponses().stream().allMatch(stopCondition));
                kafkaConsumer.commitAsync();

                int responseMessagesCounter = records.count();
                long totalMessagesCounter = shardMessagesCounter.addAndGet(responseMessagesCounter);

                if ((totalMessagesCounter > 0 && totalMessagesCounter > previousLoggedMessageCounterMod.get() + LOG_MESSAGE_COUNTER_EVERY_NTH_MESSAGE) || stopConditionMet.get()) {
                    double messagesPerSecond = LogHelper.calculateMessagesPerSecond(previousMessageLogTime.getAndSet(System.currentTimeMillis()), totalMessagesCounter - previousLoggedMessageCounter.get());

                    LOG.info("Read {} messages ({} per sec) from '{}', durationBehind={}, totalMessages={}", responseMessagesCounter, String.format("%.2f", messagesPerSecond), getChannelName(), channelResponse.getChannelDurationBehind(), totalMessagesCounter);
                    if (stopConditionMet.get() || stopSignal.get()) {
                        LOG.info("Stop reading of channel={}, stopCondition={}, stopSignal={}, durationBehind={}", getChannelName(), stopConditionMet, stopSignal.get(), channelResponse.getChannelDurationBehind());
                    }

                    previousLoggedMessageCounterMod.set(totalMessagesCounter - (totalMessagesCounter % LOG_MESSAGE_COUNTER_EVERY_NTH_MESSAGE));
                    previousLoggedMessageCounter.set(totalMessagesCounter);
                }

            }
        } while (!stopConditionMet.get() && !stopSignal.get());

    } catch (final WakeupException e) {
        // ignore for shutdown
        LOG.info("Shutting down Kafka consumer");
    }
    final double totalMessagesPerSecond = LogHelper.calculateMessagesPerSecond(firstMessageLogTime, shardMessagesCounter.get());
    LOG.info("Read a total of {} messages from '{}', totalMessagesPerSecond={}", shardMessagesCounter.get(), getChannelName(), String.format("%.2f", totalMessagesPerSecond));
    return channelPosition;
}
 
Example 16
Source File: Consumer.java    From kafka-sample-programs with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException {
    // set up house-keeping
    ObjectMapper mapper = new ObjectMapper();
    Histogram stats = new Histogram(1, 10000000, 2);
    Histogram global = new Histogram(1, 10000000, 2);

    // and the consumer
    KafkaConsumer<String, String> consumer;
    try (InputStream props = Resources.getResource("consumer.props").openStream()) {
        Properties properties = new Properties();
        properties.load(props);
        if (properties.getProperty("group.id") == null) {
            properties.setProperty("group.id", "group-" + new Random().nextInt(100000));
        }
        consumer = new KafkaConsumer<>(properties);
    }
    consumer.subscribe(Arrays.asList("fast-messages", "summary-markers"));
    int timeouts = 0;
    //noinspection InfiniteLoopStatement
    while (true) {
        // read records with a short timeout. If we time out, we don't really care.
        ConsumerRecords<String, String> records = consumer.poll(200);
        if (records.count() == 0) {
            timeouts++;
        } else {
            System.out.printf("Got %d records after %d timeouts\n", records.count(), timeouts);
            timeouts = 0;
        }
        for (ConsumerRecord<String, String> record : records) {
            switch (record.topic()) {
                case "fast-messages":
                    // the send time is encoded inside the message
                    JsonNode msg = mapper.readTree(record.value());
                    switch (msg.get("type").asText()) {
                        case "test":
                            long latency = (long) ((System.nanoTime() * 1e-9 - msg.get("t").asDouble()) * 1000);
                            stats.recordValue(latency);
                            global.recordValue(latency);
                            break;
                        case "marker":
                            // whenever we get a marker message, we should dump out the stats
                            // note that the number of fast messages won't necessarily be quite constant
                            System.out.printf("%d messages received in period, latency(min, max, avg, 99%%) = %d, %d, %.1f, %d (ms)\n",
                                    stats.getTotalCount(),
                                    stats.getValueAtPercentile(0), stats.getValueAtPercentile(100),
                                    stats.getMean(), stats.getValueAtPercentile(99));
                            System.out.printf("%d messages received overall, latency(min, max, avg, 99%%) = %d, %d, %.1f, %d (ms)\n",
                                    global.getTotalCount(),
                                    global.getValueAtPercentile(0), global.getValueAtPercentile(100),
                                    global.getMean(), global.getValueAtPercentile(99));

                            stats.reset();
                            break;
                        default:
                            throw new IllegalArgumentException("Illegal message type: " + msg.get("type"));
                    }
                    break;
                case "summary-markers":
                    break;
                default:
                    throw new IllegalStateException("Shouldn't be possible to get message on topic " + record.topic());
            }
        }
    }
}
 
Example 17
Source File: Consumer.java    From hdinsight-kafka-java-get-started with MIT License 4 votes vote down vote up
public static int consume(String brokers, String groupId, String topicName) {
    // Create a consumer
    KafkaConsumer<String, String> consumer;
    // Configure the consumer
    Properties properties = new Properties();
    // Point it to the brokers
    properties.setProperty("bootstrap.servers", brokers);
    // Set the consumer group (all consumers must belong to a group).
    properties.setProperty("group.id", groupId);
    // Set how to serialize key/value pairs
    properties.setProperty("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
    properties.setProperty("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
    // When a group is first created, it has no offset stored to start reading from. This tells it to start
    // with the earliest record in the stream.
    properties.setProperty("auto.offset.reset","earliest");

    // specify the protocol for Domain Joined clusters
    //properties.setProperty(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");

    consumer = new KafkaConsumer<>(properties);

    // Subscribe to the 'test' topic
    consumer.subscribe(Arrays.asList(topicName));

    // Loop until ctrl + c
    int count = 0;
    while(true) {
        // Poll for records
        ConsumerRecords<String, String> records = consumer.poll(200);
        // Did we get any?
        if (records.count() == 0) {
            // timeout/nothing to read
        } else {
            // Yes, loop over records
            for(ConsumerRecord<String, String> record: records) {
                // Display record and count
                count += 1;
                System.out.println( count + ": " + record.value());
            }
        }
    }
}
 
Example 18
Source File: MapRDBCDCSource.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Override
public Long call() throws Exception {
  LOG.trace("Starting poll loop in thread {}", Thread.currentThread().getName());
  long messagesProcessed = 0;

  //wait until all threads are spun up before processing
  startProcessingGate.await();

  try {
    consumer.subscribe(topicList);

    while (!getContext().isStopped()) {
      BatchContext batchContext = getContext().startBatch();
      ConsumerRecords<byte[], ChangeDataRecord> messages = consumer.poll(conf.batchWaitTime);

      for (ConsumerRecord<byte[], ChangeDataRecord> message : messages) {
        Map<String, String> attributes = new HashMap<>();
        attributes.put(HeaderAttributeConstants.TOPIC, message.topic());
        attributes.put(HeaderAttributeConstants.PARTITION, String.valueOf(message.partition()));
        attributes.put(HeaderAttributeConstants.OFFSET, String.valueOf(message.offset()));
        attributes.put(MAPR_TABLE_NAME, conf.topicTableList.get(message.topic()));

        iterateNode(message.value(), batchContext.getBatchMaker(), attributes);
      }

      getContext().processBatch(batchContext);
      messagesProcessed += messages.count();
      LOG.info("MapRDBCDC thread {} finished processing {} messages", threadID, messages.count());
    }
  } catch (OnRecordErrorException re) {
    LOG.debug("Encountered record error ");
    errorRecordHandler.onError(re);
  } catch (Exception e) {
    LOG.error("Encountered error in MapRDBCDC thread {} during read {}", threadID, e);
    handleException(MaprDBCDCErrors.MAPRDB_03, e.getMessage(), e);
  } finally {
    consumer.unsubscribe();
    consumer.close();
  }

  LOG.info("MapRDBCDC kafka thread {} consumed {} messages", threadID, messagesProcessed);
  return messagesProcessed;
}
 
Example 19
Source File: LiKafkaConsumerIntegrationTest.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testExceptionOnLargeMsgDropped() throws Exception {
  MessageSplitter splitter = new MessageSplitterImpl(MAX_SEGMENT_SIZE,
      new DefaultSegmentSerializer(),
      new UUIDFactory.DefaultUUIDFactory<>());

  String topic = "testExceptionOnLargeMsgDropped";
  createTopic(topic);
  TopicPartition tp = new TopicPartition(topic, 0);
  Collection<TopicPartition> tps = new ArrayList<>(Collections.singletonList(tp));

  //send a gigantic msg

  Producer<byte[], byte[]> producer = createRawProducer();
  // M0, 20 segments
  UUID messageId0 = LiKafkaClientsUtils.randomUUID();
  String message0 = KafkaTestUtils.getRandomString(20 * MAX_SEGMENT_SIZE);
  List<ProducerRecord<byte[], byte[]>> m0Segs = splitter.split(topic, 0, messageId0, message0.getBytes());

  for (ProducerRecord<byte[], byte[]> rec : m0Segs) {
    producer.send(rec).get();
  }

  //consumer has no hope of assembling the msg

  Properties props = new Properties();
  String groupId = "testExceptionOnLargeMsgDropped-" + UUID.randomUUID();
  props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId);
  // Make sure we start to consume from the beginning.
  props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
  // Only fetch one record at a time.
  props.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, "1");
  // No auto commit
  props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
  // Not enough memory to assemble anything
  props.setProperty(LiKafkaConsumerConfig.MESSAGE_ASSEMBLER_BUFFER_CAPACITY_CONFIG, "" + (MAX_SEGMENT_SIZE + 1));
  props.setProperty(LiKafkaConsumerConfig.EXCEPTION_ON_MESSAGE_DROPPED_CONFIG, "true");

  LiKafkaConsumer<String, String> tempConsumer = createConsumer(props);
  tempConsumer.assign(tps);

  int topicSize = m0Segs.size();
  long timeout = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(120);
  int msgsDelivered = 0;
  while (true) {
    ConsumerRecords<String, String> records;
    try {
      records = tempConsumer.poll(1000);
    } catch (ConsumerRecordsProcessingException expected) {
      Assert.assertEquals(msgsDelivered, 0);
      break;
    }
    msgsDelivered += records.count();
    long position = tempConsumer.position(tp);
    if (System.currentTimeMillis() > timeout) {
      throw new IllegalStateException("unable to consume to  the end of the topic within timeout."
          + " position=" + position + ". end=" + topicSize);
    }
  }
}
 
Example 20
Source File: KafkaReceiver.java    From zerocode with Apache License 2.0 4 votes vote down vote up
public String receive(String kafkaServers, String topicName, String requestJsonWithConfig) throws IOException {

        ConsumerLocalConfigs consumerLocalConfigs = readConsumerLocalTestProperties(requestJsonWithConfig);

        ConsumerLocalConfigs effectiveLocal = deriveEffectiveConfigs(consumerLocalConfigs, consumerCommonConfigs);

        LOGGER.info("\n### Kafka Consumer Effective configs:{}\n", effectiveLocal);

        Consumer consumer = createConsumer(kafkaServers, consumerPropertyFile, topicName);

        final List<ConsumerRecord> rawRecords = new ArrayList<>();
        final List<ConsumerJsonRecord> jsonRecords = new ArrayList<>();

        int noOfTimeOuts = 0;

        handleSeekOffset(effectiveLocal, consumer);

        while (true) {
            LOGGER.info("polling records  - noOfTimeOuts reached : " + noOfTimeOuts);

            final ConsumerRecords records = consumer.poll(ofMillis(getPollTime(effectiveLocal)));

            if (records.count() == 0) {
                noOfTimeOuts++;
                if (noOfTimeOuts > getMaxTimeOuts(effectiveLocal)) {
                    break;
                } else {
                    continue;
                }
            } else {
                LOGGER.info("Got {} records after {} timeouts\n", records.count(), noOfTimeOuts);
                // -----------------------------------
                // reset after it fetched some records
                // -----------------------------------
                noOfTimeOuts = 0;
            }

            if (records != null) {
                Iterator recordIterator = records.iterator();

                LOGGER.info("Consumer chosen recordType: " + effectiveLocal.getRecordType());

                switch (effectiveLocal.getRecordType()) {
                    case RAW:
                        readRaw(rawRecords, recordIterator);
                        break;

                    case JSON:
                        readJson(jsonRecords, recordIterator);
                        break;

                    default:
                        throw new RuntimeException("Unsupported record type - '" + effectiveLocal.getRecordType()
                                + "'. Supported values are 'JSON','RAW'");
                }

            }

            handleCommitSyncAsync(consumer, consumerCommonConfigs, effectiveLocal);
        }

        consumer.close();

        handleRecordsDump(effectiveLocal, rawRecords, jsonRecords);

        return prepareResult(effectiveLocal, jsonRecords, rawRecords);

    }