kafka.consumer.ConsumerTimeoutException Java Examples

The following examples show how to use kafka.consumer.ConsumerTimeoutException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaPublisherTest.java    From localization_nifi with Apache License 2.0 6 votes vote down vote up
@Test
public void validateSuccessfulSendAsWhole() throws Exception {
    InputStream contentStream = new ByteArrayInputStream("Hello Kafka".getBytes(StandardCharsets.UTF_8));
    String topicName = "validateSuccessfulSendAsWhole";

    Properties kafkaProperties = this.buildProducerProperties();
    KafkaPublisher publisher = new KafkaPublisher(kafkaProperties, mock(ComponentLog.class));

    PublishingContext publishingContext = new PublishingContext(contentStream, topicName);
    KafkaPublisherResult result = publisher.publish(publishingContext);

    assertEquals(0, result.getLastMessageAcked());
    assertEquals(1, result.getMessagesSent());
    contentStream.close();
    publisher.close();

    ConsumerIterator<byte[], byte[]> iter = this.buildConsumer(topicName);
    assertNotNull(iter.next());
    try {
        iter.next();
    } catch (ConsumerTimeoutException e) {
        // that's OK since this is the Kafka mechanism to unblock
    }
}
 
Example #2
Source File: KafkaConsumer.java    From sqoop-on-spark with Apache License 2.0 6 votes vote down vote up
public MessageAndMetadata getNextMessage(String topic) {
  List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
  // it has only a single stream, because there is only one consumer
  KafkaStream stream = streams.get(0);
  final ConsumerIterator<byte[], byte[]> it = stream.iterator();
  int counter = 0;
  try {
    if (it.hasNext()) {
      return it.next();
    } else {
      return null;
    }
  } catch (ConsumerTimeoutException e) {
    logger.error("0 messages available to fetch for the topic " + topic);
    return null;
  }
}
 
Example #3
Source File: KafkaConsumer08.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
public MessageAndOffset read() throws StageException {
  try {
    //has next blocks indefinitely if consumer.timeout.ms is set to -1
    //But if consumer.timeout.ms is set to a value, like 6000, a ConsumerTimeoutException is thrown
    //if no message is written to kafka topic in that time.
    if(consumerIterator.hasNext()) {
      MessageAndMetadata<byte[], byte[]> messageAndMetadata = consumerIterator.next();
      byte[] message = messageAndMetadata.message();
      long offset = messageAndMetadata.offset();
      int partition = messageAndMetadata.partition();
      return new MessageAndOffset(messageAndMetadata.key(), message, offset, partition);
    }
    return null;
  } catch (ConsumerTimeoutException e) {
    /*For high level consumer the fetching logic is handled by a background
      fetcher thread and is hidden from user, for either case of
      1) broker down or
      2) no message is available
      the fetcher thread will keep retrying while the user thread will wait on the fetcher thread to put some
      data into the buffer until timeout. So in a sentence the high-level consumer design is to
      not let users worry about connect / reconnect issues.*/
    return null;
  }
}
 
Example #4
Source File: LegacyKafkaMessageIterator.java    From secor with Apache License 2.0 6 votes vote down vote up
@Override
public Message next() {
    MessageAndMetadata<byte[], byte[]> kafkaMessage;
    try {
        kafkaMessage = mIterator.next();
    } catch (ConsumerTimeoutException e) {
        throw new LegacyConsumerTimeoutException(e);
    }

    long timestamp = 0L;
    if (mConfig.useKafkaTimestamp()) {
        timestamp = mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(kafkaMessage);
    }

    return new Message(kafkaMessage.topic(), kafkaMessage.partition(),
            kafkaMessage.offset(), kafkaMessage.key(),
            kafkaMessage.message(), timestamp, null);
}
 
Example #5
Source File: KafkaPublisherTest.java    From nifi with Apache License 2.0 6 votes vote down vote up
@Test
public void validateSuccessfulSendAsWhole() throws Exception {
    InputStream contentStream = new ByteArrayInputStream("Hello Kafka".getBytes(StandardCharsets.UTF_8));
    String topicName = "validateSuccessfulSendAsWhole";

    Properties kafkaProperties = this.buildProducerProperties();
    KafkaPublisher publisher = new KafkaPublisher(kafkaProperties, mock(ComponentLog.class));

    PublishingContext publishingContext = new PublishingContext(contentStream, topicName);
    KafkaPublisherResult result = publisher.publish(publishingContext);

    assertEquals(0, result.getLastMessageAcked());
    assertEquals(1, result.getMessagesSent());
    contentStream.close();
    publisher.close();

    ConsumerIterator<byte[], byte[]> iter = this.buildConsumer(topicName);
    assertNotNull(iter.next());
    try {
        iter.next();
    } catch (ConsumerTimeoutException e) {
        // that's OK since this is the Kafka mechanism to unblock
    }
}
 
Example #6
Source File: KafkaWorker.java    From elasticsearch-river-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Consumes the messages from the partition via specified stream.
 */
private void consumeMessagesAndAddToBulkProcessor(final KafkaStream stream) {

    try {
        // by default it waits forever for message, but there is timeout configured
        final ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator();

        // Consume all the messages of the stream (partition)
        while (consumerIterator.hasNext() && consume) {

            final MessageAndMetadata messageAndMetadata = consumerIterator.next();
            logMessage(messageAndMetadata);

            elasticsearchProducer.addMessagesToBulkProcessor(messageAndMetadata);

            // StatsD reporting
            stats.messagesReceived.incrementAndGet();
            stats.lastCommitOffsetByPartitionId.put(messageAndMetadata.partition(), messageAndMetadata.offset());
        }
    } catch (ConsumerTimeoutException ex) {
        logger.debug("Nothing to be consumed for now. Consume flag is: {}", consume);
    }
}
 
Example #7
Source File: KafkaPublisherTest.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
@Test
public void validateSuccessfulSendAsDelimited() throws Exception {
    InputStream contentStream = new ByteArrayInputStream(
            "Hello Kafka\nHello Kafka\nHello Kafka\nHello Kafka\n".getBytes(StandardCharsets.UTF_8));
    String topicName = "validateSuccessfulSendAsDelimited";

    Properties kafkaProperties = this.buildProducerProperties();
    KafkaPublisher publisher = new KafkaPublisher(kafkaProperties, mock(ComponentLog.class));

    PublishingContext publishingContext = new PublishingContext(contentStream, topicName);
    publishingContext.setDelimiterBytes("\n".getBytes(StandardCharsets.UTF_8));
    KafkaPublisherResult result = publisher.publish(publishingContext);

    assertEquals(3, result.getLastMessageAcked());
    assertEquals(4, result.getMessagesSent());
    contentStream.close();
    publisher.close();

    ConsumerIterator<byte[], byte[]> iter = this.buildConsumer(topicName);
    assertNotNull(iter.next());
    assertNotNull(iter.next());
    assertNotNull(iter.next());
    assertNotNull(iter.next());
    try {
        iter.next();
        fail();
    } catch (ConsumerTimeoutException e) {
        // that's OK since this is the Kafka mechanism to unblock
    }
}
 
Example #8
Source File: KafkaSource.java    From flume-ng-extends-source with MIT License 5 votes vote down vote up
/**
 * Check if there are messages waiting in Kafka,
 * waiting until timeout (10ms by default) for messages to arrive.
 * and catching the timeout exception to return a boolean
 */
boolean hasNext() {
  try {
    it.hasNext();
    return true;
  } catch (ConsumerTimeoutException e) {
    return false;
  }
}
 
Example #9
Source File: FastKafkaSource.java    From fraud-detection-tutorial with Apache License 2.0 5 votes vote down vote up
boolean hasNext() {
  try {
    this.it.hasNext();
    return true;
  } catch (ConsumerTimeoutException var2) {
    return false;
  }
}
 
Example #10
Source File: LegacyKafkaMessageIterator.java    From secor with Apache License 2.0 5 votes vote down vote up
@Override
public boolean hasNext() {
    try {
        return mIterator.hasNext();
    } catch (ConsumerTimeoutException e) {
        throw new LegacyConsumerTimeoutException(e);
    }
}
 
Example #11
Source File: MessageResource.java    From dropwizard-kafka-http with Apache License 2.0 5 votes vote down vote up
@GET
@Timed
public Response consume(
        @QueryParam("topic") String topic,
        @QueryParam("timeout") Integer timeout
) {
    if (Strings.isNullOrEmpty(topic))
        return Response.status(400)
                .entity(new String[]{"Undefined topic"})
                .build();

    Properties props = (Properties) consumerCfg.clone();
    if (timeout != null) props.put("consumer.timeout.ms", "" + timeout);

    ConsumerConfig config = new ConsumerConfig(props);
    ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);

    Map<String, Integer> streamCounts = Collections.singletonMap(topic, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(streamCounts);
    KafkaStream<byte[], byte[]> stream = streams.get(topic).get(0);

    List<Message> messages = new ArrayList<>();
    try {
        for (MessageAndMetadata<byte[], byte[]> messageAndMetadata : stream)
            messages.add(new Message(messageAndMetadata));
    } catch (ConsumerTimeoutException ignore) {
    } finally {
        connector.commitOffsets();
        connector.shutdown();
    }

    return Response.ok(messages).build();
}
 
Example #12
Source File: KafkaPublisherTest.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Test
public void validateSuccessfulSendAsDelimited() throws Exception {
    InputStream contentStream = new ByteArrayInputStream(
            "Hello Kafka\nHello Kafka\nHello Kafka\nHello Kafka\n".getBytes(StandardCharsets.UTF_8));
    String topicName = "validateSuccessfulSendAsDelimited";

    Properties kafkaProperties = this.buildProducerProperties();
    KafkaPublisher publisher = new KafkaPublisher(kafkaProperties, mock(ComponentLog.class));

    PublishingContext publishingContext = new PublishingContext(contentStream, topicName);
    publishingContext.setDelimiterBytes("\n".getBytes(StandardCharsets.UTF_8));
    KafkaPublisherResult result = publisher.publish(publishingContext);

    assertEquals(3, result.getLastMessageAcked());
    assertEquals(4, result.getMessagesSent());
    contentStream.close();
    publisher.close();

    ConsumerIterator<byte[], byte[]> iter = this.buildConsumer(topicName);
    assertNotNull(iter.next());
    assertNotNull(iter.next());
    assertNotNull(iter.next());
    assertNotNull(iter.next());
    try {
        iter.next();
        fail();
    } catch (ConsumerTimeoutException e) {
        // that's OK since this is the Kafka mechanism to unblock
    }
}
 
Example #13
Source File: KafkaConsumerCallable.java    From pentaho-kafka-consumer with Apache License 2.0 5 votes vote down vote up
public Object call() throws KettleException {
    try {
        long limit;
        String strData = meta.getLimit();

        limit = getLimit(strData);
        if (limit > 0) {
            step.logDebug("Collecting up to " + limit + " messages");
        } else {
            step.logDebug("Collecting unlimited messages");
        }
        while (data.streamIterator.hasNext() && !data.canceled && (limit <= 0 || data.processed < limit)) {
            MessageAndMetadata<byte[], byte[]> messageAndMetadata = data.streamIterator.next();
            messageReceived(messageAndMetadata.key(), messageAndMetadata.message());
            ++data.processed;
        }
    } catch (ConsumerTimeoutException cte) {
        step.logDebug("Received a consumer timeout after " + data.processed + " messages");
        if (!meta.isStopOnEmptyTopic()) {
            // Because we're not set to stop on empty, this is an abnormal
            // timeout
            throw new KettleException("Unexpected consumer timeout!", cte);
        }
    }
    // Notify that all messages were read successfully
    data.consumer.commitOffsets();
    step.setOutputDone();
    return null;
}
 
Example #14
Source File: TestKafkaSink.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testConfigBackwardCompatible() throws IOException {
    int numPartitions = 9;

    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
                    "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
    String keyTopicMap = String.format("   \"keyTopicMap\": {\n" +
            "        \"%s\": \"key\"\n" +
            "    }", TOPIC_NAME_BACKWARD_COMPAT);

    String description1 = "{\n" +
            "    \"type\": \"Kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"ack\": 1,\n" +
            "     \"compression.type\": \"snappy\",\n" +
            keyTopicMap + "\n" +
            "}";
    String description2 = "{\n" +
            "    \"type\": \"Kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1,\n" +
            "     \"compression.codec\": \"snappy\",\n" +
            keyTopicMap + "\n" +
            "}";

    // setup sinks, both old and new versions
    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "Kafka"));
    jsonMapper.setInjectableValues(new InjectableValues() {
        @Override
        public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
            if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
                return new KafkaRetentionPartitioner();
            } else {
                return null;
            }
        }
    });
    KafkaSink sink1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
    KafkaSink sink2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
    sink1.open();
    sink2.open();
    List<Sink> sinks = new ArrayList<Sink>();
    sinks.add(sink1);
    sinks.add(sink2);

    // setup Kafka consumer (to read back messages)
    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
            createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
            consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);

    // Send 20 test message, using the old and new Kafka sinks.
    // Retrieve the messages and ensure that they are identical and sent to the same partition.
    Random rand = new Random();
    int messageCount = 20;
    for (int i = 0; i < messageCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", new Long( rand.nextLong() ) )
                .put("value", "message:" + i).build();

        // send message to both sinks
        for( Sink sink : sinks ){
            sink.writeTo(new DefaultMessageContainer(
                    new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
                    jsonMapper));
        }

        // read two copies of message back from Kafka and check that partitions and data match
        MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
        MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
        System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
        System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
        assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
        String msg1Str = new String( msgAndMeta1.message() );
        String msg2Str = new String( msgAndMeta2.message() );
        System.out.println( "iteration: "+i+" message1: "+msg1Str );
        System.out.println( "iteration: "+i+" message2: "+msg2Str );
        assertEquals(msg1Str, msg2Str);
    }

    // close sinks
    sink1.close();
    sink2.close();
    // close consumer
    try {
        stream.iterator().next();
        fail(); // there should be no data left to consume
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}
 
Example #15
Source File: TestKafkaSinkV2.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testMultithread() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_MULTITHREAD,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1,\n" +
            "    \"batchSize\": 10,\n" +
            "    \"jobQueueSize\": 3\n" +
            "}";

    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
    KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    int msgCount = 10000;
    for (int i = 0; i < msgCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", Integer.toString(i))
                .put("value", "message:" + i).build();
        sink.writeTo(new DefaultMessageContainer(
                new Message(TOPIC_NAME_MULTITHREAD, jsonMapper.writeValueAsBytes(msgMap)),
                jsonMapper));
    }
    assertTrue(sink.getNumOfPendingMessages() > 0);
    sink.close();
    System.out.println(sink.getStat());
    assertEquals(sink.getNumOfPendingMessages(), 0);

    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
            createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid_multhread"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_MULTITHREAD, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_MULTITHREAD).get(0);
    for (int i = 0; i < msgCount; ++i) {
        stream.iterator().next();
    }

    try {
        stream.iterator().next();
        fail();
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}
 
Example #16
Source File: TestKafkaSinkV2.java    From suro with Apache License 2.0 4 votes vote down vote up
/** Tests backward compatability with old Kafka sink. */
@Test
public void testBackwardCompatability() throws Exception {
    int numPartitions = 9;

    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
                    "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
    String keyTopicMap = String.format("   \"keyTopicMap\": {\n" +
            "        \"%s\": \"key\"\n" +
            "    }", TOPIC_NAME_BACKWARD_COMPAT);

    String description1 = "{\n" +
        "    \"type\": \"kafkaV1\",\n" +
        "    \"client.id\": \"kafkasink\",\n" +
        "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
        "    \"ack\": 1,\n" +
        keyTopicMap + "\n" +
        "}";
    String description2 = "{\n" +
        "    \"type\": \"kafkaV2\",\n" +
        "    \"client.id\": \"kafkasink\",\n" +
        "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
        "    \"request.required.acks\": 1,\n" +
        keyTopicMap + "\n" +
        "}";

    // setup sinks, both old and new versions
    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "kafkaV1"));
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafkaV2"));
    jsonMapper.setInjectableValues(new InjectableValues() {
        @Override
        public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
            if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
                return new KafkaRetentionPartitioner();
            } else {
                return null;
            }
        }
    });
    KafkaSink sinkV1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
    KafkaSinkV2 sinkV2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
    sinkV1.open();
    sinkV2.open();
    List<Sink> sinks = new ArrayList<Sink>();
    sinks.add(sinkV1);
    sinks.add(sinkV2);

    // setup Kafka consumer (to read back messages)
    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
        createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = 
                                            consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);

    // Send 20 test message, using the old and new Kafka sinks.
    // Retrieve the messages and ensure that they are identical and sent to the same partition.
    Random rand = new Random();
    int messageCount = 20;
    for (int i = 0; i < messageCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", new Long( rand.nextLong() ) )
                .put("value", "message:" + i).build();

        // send message to both sinks
        for( Sink sink : sinks ){
          sink.writeTo(new DefaultMessageContainer(
                new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
                jsonMapper));
        }

        // read two copies of message back from Kafka and check that partitions and data match
        MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
        MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
        System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
        System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
        assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
        String msg1Str = new String( msgAndMeta1.message() );
        String msg2Str = new String( msgAndMeta2.message() );
        System.out.println( "iteration: "+i+" message1: "+msg1Str );
        System.out.println( "iteration: "+i+" message2: "+msg2Str );
        assertEquals(msg1Str, msg2Str);
    }

    // close sinks
    sinkV1.close();
    sinkV2.close();
    // close consumer
    try {
        stream.iterator().next();
        fail(); // there should be no data left to consume
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}