kafka.api.FetchRequestBuilder Java Examples

The following examples show how to use kafka.api.FetchRequestBuilder. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaLeaderReader.java    From arcusplatform with Apache License 2.0 7 votes vote down vote up
private int readNext() {
   FetchRequestBuilder requestBuilder =
         new FetchRequestBuilder()
            .clientId(clientId);
   for(Map.Entry<TopicAndPartition, Long> offset: offsets.entrySet()) {
      if(offset.getValue() == null) {
         logger.warn("Invalid offset for topic: [{}] partition: [{}]", offset.getKey().topic(), offset.getKey().partition());
      }
      else {
         requestBuilder.addFetch(offset.getKey().topic(), offset.getKey().partition(), offset.getValue(), fetchSize);
      }
   }
   FetchRequest request = requestBuilder.build();
   FetchResponse response = getKafkaConsumer().fetch(request);
   // FIXME handle errors / leader rebalances hear
   return dispatch(response);
}
 
Example #2
Source File: KafkaPartitionReader.java    From Scribengin with GNU Affero General Public License v3.0 6 votes vote down vote up
void nextMessageSet() throws Exception {
  FetchRequest req = 
      new FetchRequestBuilder().
      clientId(name).
      addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize).
      minBytes(1).
      maxWait(1000).
      build();
  
  FetchResponse fetchResponse = consumer.fetch(req);
  if(fetchResponse.hasError()) {
    throw new Exception("TODO: handle the error, reset the consumer....");
  }
  
  currentMessageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId());
  currentMessageSetIterator = currentMessageSet.iterator();
}
 
Example #3
Source File: DemoLowLevelConsumer.java    From KafkaExample with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
	final String topic = "topic1";
	String clientID = "DemoLowLevelConsumer1";
	SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID);
	FetchRequest req = new FetchRequestBuilder().clientId(clientID)
			.addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build();
	FetchResponse fetchResponse = simpleConsumer.fetch(req);
	ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0);
	for (MessageAndOffset messageAndOffset : messageSet) {
		ByteBuffer payload = messageAndOffset.message().payload();
		long offset = messageAndOffset.offset();
		byte[] bytes = new byte[payload.limit()];
		payload.get(bytes);
		System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8"));
	}
}
 
Example #4
Source File: KafkaComponent.java    From metron with Apache License 2.0 6 votes vote down vote up
public List<byte[]> readMessages(String topic) {
  SimpleConsumer consumer = new SimpleConsumer("localhost", 6667, 100000, 64 * 1024, "consumer");
  FetchRequest req = new FetchRequestBuilder()
          .clientId("consumer")
          .addFetch(topic, 0, 0, 100000)
          .build();
  FetchResponse fetchResponse = consumer.fetch(req);
  Iterator<MessageAndOffset> results = fetchResponse.messageSet(topic, 0).iterator();
  List<byte[]> messages = new ArrayList<>();
  while(results.hasNext()) {
    ByteBuffer payload = results.next().message().payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    messages.add(bytes);
  }
  consumer.close();
  return messages;
}
 
Example #5
Source File: KafkaLowLevelConsumer09.java    From datacollector with Apache License 2.0 6 votes vote down vote up
private FetchRequest buildFetchRequest(long offset) {
  //1. maxWaitTime is the maximum amount of time in milliseconds to block waiting if insufficient data is
  //   available at the time the request is issued.

  //2. minFetchSize is the minimum number of bytes of messages that must be available to give a response. If the
  //   client sets this to 0 the server will always respond immediately, however if there is no new data since their
  //   last request they will just get back empty message sets. If this is set to 1, the server will respond as soon
  //   as at least one partition has at least 1 byte of data or the specified timeout occurs. By setting higher
  //   values in combination with the timeout the consumer can tune for throughput and trade a little additional
  //   latency for reading only large chunks of data (e.g. setting MaxWaitTime to 100 ms and setting MinBytes to 64k
  //   would allow the server to wait up to 100ms to try to accumulate 64k of data before responding).

  //3. maxFetchSize is the maximum bytes to include in the message set for this partition.
  //   This helps bound the size of the response.
  LOG.info("Building fetch request with clientId {}, minBytes {}, maxWait {}, topic {}, partition {}, offset {}, " +
    "max fetch size {}.", clientName, minFetchSize, maxWaitTime, topic, partition, offset, maxFetchSize);
  return new FetchRequestBuilder()
    .clientId(clientName)
    .minBytes(minFetchSize)
    .maxWait(maxWaitTime)
    .addFetch(topic, partition, offset, maxFetchSize)
    .build();
}
 
Example #6
Source File: KafkaSimpleConsumer.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Override
  public void run()
  {
    long offset = 0;
    while (isAlive) {
      // create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of 1MB
      FetchRequest fetchRequest = new FetchRequestBuilder().clientId("default_client").addFetch("topic1", 1, offset, 1000000).build();

//      FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000);

      // get the message set from the consumer and print them out
      ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1);
      Iterator<MessageAndOffset> itr = messages.iterator();

      while (itr.hasNext() && isAlive) {
        MessageAndOffset msg = itr.next();
        // advance the offset after consuming each message
        offset = msg.offset();
        logger.debug("consumed: {} offset: {}", byteBufferToString(msg.message().payload()).toString(), offset);
        receiveCount++;
      }
    }
  }
 
Example #7
Source File: KafkaLowLevelConsumer08.java    From datacollector with Apache License 2.0 6 votes vote down vote up
private FetchRequest buildFetchRequest(long offset) {
  //1. maxWaitTime is the maximum amount of time in milliseconds to block waiting if insufficient data is
  //   available at the time the request is issued.

  //2. minFetchSize is the minimum number of bytes of messages that must be available to give a response. If the
  //   client sets this to 0 the server will always respond immediately, however if there is no new data since their
  //   last request they will just get back empty message sets. If this is set to 1, the server will respond as soon
  //   as at least one partition has at least 1 byte of data or the specified timeout occurs. By setting higher
  //   values in combination with the timeout the consumer can tune for throughput and trade a little additional
  //   latency for reading only large chunks of data (e.g. setting MaxWaitTime to 100 ms and setting MinBytes to 64k
  //   would allow the server to wait up to 100ms to try to accumulate 64k of data before responding).

  //3. maxFetchSize is the maximum bytes to include in the message set for this partition.
  //   This helps bound the size of the response.
  LOG.info("Building fetch request with clientId {}, minBytes {}, maxWait {}, topic {}, partition {}, offset {}, " +
    "max fetch size {}.", clientName, minFetchSize, maxWaitTime, topic, partition, offset, maxFetchSize);
  return new FetchRequestBuilder()
    .clientId(clientName)
    .minBytes(minFetchSize)
    .maxWait(maxWaitTime)
    .addFetch(topic, partition, offset, maxFetchSize)
    .build();
}
 
Example #8
Source File: KafkaUtils.java    From storm-kafka-0.8-plus with Apache License 2.0 5 votes vote down vote up
public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset) {
    ByteBufferMessageSet msgs = null;
    String topic = config.topic;
    int partitionId = partition.partition;
    for (int errors = 0; errors < 2 && msgs == null; errors++) {
        FetchRequestBuilder builder = new FetchRequestBuilder();
        FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes).
                clientId(config.clientId).build();
        FetchResponse fetchResponse;
        try {
            fetchResponse = consumer.fetch(fetchRequest);
        } catch (Exception e) {
            if (e instanceof ConnectException) {
                throw new FailedFetchException(e);
            } else {
                throw new RuntimeException(e);
            }
        }
        if (fetchResponse.hasError()) {
            KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId));
            if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange && errors == 0) {
                long startOffset = getOffset(consumer, topic, partitionId, config.startOffsetTime);
                LOG.warn("Got fetch request with offset out of range: [" + offset + "]; " +
                        "retrying with default start offset time from configuration. " +
                        "configured start offset time: [" + config.startOffsetTime + "] offset: [" + startOffset + "]");
                offset = startOffset;
            } else {
                String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]";
                LOG.error(message);
                throw new FailedFetchException(message);
            }
        } else {
            msgs = fetchResponse.messageSet(topic, partitionId);
        }
    }
    return msgs;
}
 
Example #9
Source File: KafkaPartitionReader.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
public List<byte[]> execute() throws Exception {
  FetchRequest req = 
      new FetchRequestBuilder().
      clientId(name).
      addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize).
      minBytes(1).
      maxWait(maxWait).
      build();
  
  FetchResponse fetchResponse = consumer.fetch(req);
  if(fetchResponse.hasError()) {
    short errorCode = fetchResponse.errorCode(topic, partitionMetadata.partitionId());
    String msg = "Kafka error code = " + errorCode + ", Partition  " + partitionMetadata.partitionId() ;
    throw new Exception(msg);
  }
  List<byte[]> holder = new ArrayList<byte[]>();
  ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId());
  int count = 0;
  for(MessageAndOffset messageAndOffset : messageSet) {
    if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore
    ByteBuffer payload = messageAndOffset.message().payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    holder.add(bytes);
    currentOffset = messageAndOffset.nextOffset();
    count++;
    if(count == maxRead) break;
  }
  return holder ;
}
 
Example #10
Source File: KafkaPartitionLevelConsumer.java    From incubator-pinot with Apache License 2.0 5 votes vote down vote up
/**
 * Fetch messages and the per-partition high watermark from Kafka between the specified offsets.
 *
 * @param startOffset The offset of the first message desired, inclusive
 * @param endOffset The offset of the last message desired, exclusive, or {@link Long#MAX_VALUE} for no end offset.
 * @param timeoutMillis Timeout in milliseconds
 * @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis}
 * milliseconds
 * @return An iterable containing messages fetched from Kafka and their offsets, as well as the high watermark for
 * this partition.
 */
public synchronized MessageBatch fetchMessages(long startOffset, long endOffset, int timeoutMillis)
    throws java.util.concurrent.TimeoutException {
  // TODO Improve error handling

  final long connectEndTime = System.currentTimeMillis() + _connectTimeoutMillis;
  while (_currentState.getStateValue() != KafkaConnectionHandler.ConsumerState.CONNECTED_TO_PARTITION_LEADER
      && System.currentTimeMillis() < connectEndTime) {
    _currentState.process();
  }
  if (_currentState.getStateValue() != KafkaConnectionHandler.ConsumerState.CONNECTED_TO_PARTITION_LEADER
      && connectEndTime <= System.currentTimeMillis()) {
    throw new java.util.concurrent.TimeoutException();
  }

  FetchResponse fetchResponse = _simpleConsumer.fetch(
      new FetchRequestBuilder().minBytes(_fetchRequestMinBytes).maxWait(timeoutMillis)
          .addFetch(_topic, _partition, startOffset, _fetchRequestSizeBytes).build());

  if (!fetchResponse.hasError()) {
    final Iterable<MessageAndOffset> messageAndOffsetIterable =
        buildOffsetFilteringIterable(fetchResponse.messageSet(_topic, _partition), startOffset, endOffset);

    // TODO: Instantiate with factory
    return new SimpleConsumerMessageBatch(messageAndOffsetIterable);
  } else {
    throw exceptionForKafkaErrorCode(fetchResponse.errorCode(_topic, _partition));
  }
}
 
Example #11
Source File: AbstractExactlyOnceKafkaOutputOperator.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
private void initializeLastProcessingOffset()
{
  // read last received kafka message
  TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());

  if (tm == null) {
    throw new RuntimeException("Failed to retrieve topic metadata");
  }

  partitionNum = tm.partitionsMetadata().size();

  lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum);

  for (PartitionMetadata pm : tm.partitionsMetadata()) {

    String leadBroker = pm.leader().host();
    int port = pm.leader().port();
    String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
    SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);

    long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);

    FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();

    FetchResponse fetchResponse = consumer.fetch(req);
    for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {

      Message m = messageAndOffset.message();

      ByteBuffer payload = m.payload();
      ByteBuffer key = m.key();
      byte[] valueBytes = new byte[payload.limit()];
      byte[] keyBytes = new byte[key.limit()];
      payload.get(valueBytes);
      key.get(keyBytes);
      lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
    }
  }
}
 
Example #12
Source File: KafkaMessageReceiverImpl.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
/**
 * Check the leader.
 *
 * @param a_topic       topic name
 * @param a_partition   partition number
 * @param a_beginOffset begin offset
 * @return boolean
 */
private boolean checkLeader(String a_topic, int a_partition,
                            long a_beginOffset) {

    if (checkConsumer(a_topic, a_partition)) {

        FetchRequest req = new FetchRequestBuilder()
                .clientId(pool.getClientId())
                .addFetch(a_topic, a_partition, a_beginOffset,
                        KafkaConstants.FETCH_SIZE).build();
        fetchResponse = consumer.get().fetch(req);
        String leadHost = metadata.leader().host();

        if (fetchResponse.hasError()) {

            // Something went wrong!
            short code = fetchResponse.errorCode(a_topic, a_partition);
            logger.error("Error fetching data from the Broker:" + leadHost
                    + " Reason: " + code);

            if (code == ErrorMapping.OffsetOutOfRangeCode()) {
                // We asked for an invalid offset. For simple case ask for
                // the last element to reset
                a_beginOffset = getLatestOffset(a_topic, a_partition);
            }
            consumer.get().close();
            consumer.set(null);

            try {
                metadata = findNewLeader(leadHost, a_topic, a_partition);
            } catch (MQException e) {
                logger.error("Find new leader failed.", e);
            }
            return false;
        }

        return true;
    }
    return false;
}
 
Example #13
Source File: SimpleKafkaConsumer.java    From twill with Apache License 2.0 5 votes vote down vote up
/**
 * Makes a call to kafka to fetch messages.
 */
private FetchResponse fetchMessages(SimpleConsumer consumer, long offset) {
  FetchRequest request = new FetchRequestBuilder()
    .clientId(consumer.clientId())
    .addFetch(topicPart.getTopic(), topicPart.getPartition(), offset, FETCH_SIZE)
    .maxWait(MAX_WAIT)
    .build();
  return consumer.fetch(request);
}
 
Example #14
Source File: SimpleConsumerDemo.java    From javabase with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    generateData();

    SimpleConsumer simpleConsumer = new SimpleConsumer(KafkaProperties.KAFKA_SERVER_URL,
        KafkaProperties.KAFKA_SERVER_PORT,
        KafkaProperties.CONNECTION_TIMEOUT,
        KafkaProperties.KAFKA_PRODUCER_BUFFER_SIZE,
        KafkaProperties.CLIENT_ID);

    System.out.println("Testing single fetch");
    FetchRequest req = new FetchRequestBuilder()
        .clientId(KafkaProperties.CLIENT_ID)
        .addFetch(KafkaProperties.TOPIC2, 0, 0L, 100)
        .build();
    FetchResponse fetchResponse = simpleConsumer.fetch(req);
    printMessages(fetchResponse.messageSet(KafkaProperties.TOPIC2, 0));

    System.out.println("Testing single multi-fetch");
    Map<String, List<Integer>> topicMap = new HashMap<String, List<Integer>>();
    topicMap.put(KafkaProperties.TOPIC2, Collections.singletonList(0));
    topicMap.put(KafkaProperties.TOPIC3, Collections.singletonList(0));
    req = new FetchRequestBuilder()
        .clientId(KafkaProperties.CLIENT_ID)
        .addFetch(KafkaProperties.TOPIC2, 0, 0L, 100)
        .addFetch(KafkaProperties.TOPIC3, 0, 0L, 100)
        .build();
    fetchResponse = simpleConsumer.fetch(req);
    int fetchReq = 0;
    for (Map.Entry<String, List<Integer>> entry : topicMap.entrySet()) {
        String topic = entry.getKey();
        for (Integer offset : entry.getValue()) {
            System.out.println("Response from fetch request no: " + ++fetchReq);
            printMessages(fetchResponse.messageSet(topic, offset));
        }
    }
}
 
Example #15
Source File: KafkaSimpleConsumer.java    From Pistachio with Apache License 2.0 4 votes vote down vote up
public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException {
    List<BytesMessageWithOffset> newOffsetMsg = new ArrayList<BytesMessageWithOffset>();
    FetchResponse response = null;
    Broker previousLeader = leaderBroker;
    while (true) {
        ensureConsumer(previousLeader);

        if (offset == Long.MAX_VALUE) {
            offset = getOffset(false);
            logger.info("offset max long, fetch from latest in kafka {}", offset);
        }

        FetchRequest request = new FetchRequestBuilder()
                .clientId(clientId)
                .addFetch(topic, partitionId, offset, 100000000)
                .maxWait(timeoutMs)
                .minBytes(1)
                .build();

        //logger.debug("fetch offset {}", offset);

        try {
            response = consumer.fetch(request);
        } catch (Exception e) {
            // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio
            if (Thread.interrupted()) {
                logger.info("catch exception of {} with interrupted in fetch for {} - {} with offset {}",
                        e.getClass().getName(), topic, partitionId, offset);

                throw new InterruptedException();
            }
            logger.warn("caughte exception in fetch {} - {}", topic, partitionId, e);
            response = null;
        }

        if (response == null || response.hasError()) {
            short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode();
            logger.warn("fetch {} - {} with offset {} encounters error: {}", topic, partitionId, offset, errorCode);

            boolean needNewLeader = false;
            if (errorCode == ErrorMapping.RequestTimedOutCode()) {
                //TODO: leave it here
            } else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) {
                //TODO: fetch the earliest offset or latest offset ?
                // seems no obvious correct way to handle it
                long earliestOffset = getOffset(true);
                logger.debug("get earilset offset {} for {} - {}", earliestOffset, topic, partitionId);
                if (earliestOffset < 0) {
                    needNewLeader = true;
                } else {
                    newOffsetMsg.add(new BytesMessageWithOffset(null, earliestOffset));
                    offset = earliestOffset;
                    continue;
                }
            } else {
                needNewLeader = true;
            }

            if (needNewLeader) {
                stopConsumer();
                previousLeader = leaderBroker;
                leaderBroker = null;
                continue;
            }
        } else {
            break;
        }
    }

    return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) :
        (newOffsetMsg.size() > 0 ? newOffsetMsg : EMPTY_MSGS);
}
 
Example #16
Source File: LegacyKafkaClient.java    From secor with Apache License 2.0 4 votes vote down vote up
private Message getMessage(TopicPartition topicPartition, long offset,
                           SimpleConsumer consumer) {
    LOG.debug("fetching message topic {} partition {} offset {}",
            topicPartition.getTopic(), topicPartition.getPartition(), offset);
    final int MAX_MESSAGE_SIZE_BYTES = mConfig.getMaxMessageSizeBytes();
    final String clientName = getClientName(topicPartition);
    kafka.api.FetchRequest request = new FetchRequestBuilder().clientId(clientName)
            .addFetch(topicPartition.getTopic(), topicPartition.getPartition(), offset,
                      MAX_MESSAGE_SIZE_BYTES)
            .build();
    FetchResponse response = consumer.fetch(request);
    if (response.hasError()) {
        consumer.close();
        int errorCode = response.errorCode(topicPartition.getTopic(), topicPartition.getPartition());

        if (errorCode == Errors.OFFSET_OUT_OF_RANGE.code()) {
          throw new MessageDoesNotExistException();
        } else {
          throw new RuntimeException("Error fetching offset data. Reason: " + errorCode);
        }
    }
    MessageAndOffset messageAndOffset = response.messageSet(
            topicPartition.getTopic(), topicPartition.getPartition()).iterator().next();
    byte[] keyBytes = null;
    if (messageAndOffset.message().hasKey()) {
        ByteBuffer key = messageAndOffset.message().key();
        keyBytes = new byte[key.limit()];
        key.get(keyBytes);
    }
    byte[] payloadBytes = null;
    if (!messageAndOffset.message().isNull()) {
        ByteBuffer payload = messageAndOffset.message().payload();
        payloadBytes = new byte[payload.limit()];
        payload.get(payloadBytes);
    }
    long timestamp = (mConfig.useKafkaTimestamp())
            ? mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(messageAndOffset)
            : 0l;

    return new Message(topicPartition.getTopic(), topicPartition.getPartition(),
            messageAndOffset.offset(), keyBytes, payloadBytes, timestamp, null);
}
 
Example #17
Source File: LowLevelConsumerExample.java    From pulsar with Apache License 2.0 4 votes vote down vote up
private static void consumeMessage(Arguments arguments) {

        Properties properties = new Properties();
        properties.put(SimpleConsumer.HTTP_SERVICE_URL, arguments.httpServiceUrl);
        SimpleConsumer consumer = new SimpleConsumer(arguments.serviceUrl, 0, 0, 0, "clientId", properties);

        long readOffset = kafka.api.OffsetRequest.EarliestTime();
        kafka.api.FetchRequest fReq = new FetchRequestBuilder().clientId("c1")
                .addFetch(arguments.topicName, arguments.partitionIndex, readOffset, 100000).build();
        FetchResponse fetchResponse = consumer.fetch(fReq);

        TestDecoder decoder = new TestDecoder();
        int count = 0;
        while (count < arguments.totalMessages || arguments.totalMessages == -1) {
            // 1. Read from topic without subscription/consumer-group name.
            for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(arguments.topicName,
                    arguments.partitionIndex)) {
                MessageId msgIdOffset = (messageAndOffset instanceof PulsarMsgAndOffset)
                        ? ((PulsarMsgAndOffset) messageAndOffset).getFullOffset()
                        : null;
                long currentOffset = messageAndOffset.offset();
                if (currentOffset < readOffset) {
                    continue;
                }

                ByteBuffer payload = messageAndOffset.message().payload();

                byte[] bytes = new byte[payload.limit()];
                payload.get(bytes);
                Tweet tweet = decoder.fromBytes(bytes);
                log.info("Received tweet: {}-{}", tweet.userName, tweet.message);
                count++;

                TopicAndPartition topicPartition = new TopicAndPartition(arguments.topicName, arguments.partitionIndex);
                OffsetMetadataAndError offsetError = new OffsetMetadataAndError(msgIdOffset, null, (short) 0);
                Map<TopicAndPartition, OffsetMetadataAndError> requestInfo = Collections.singletonMap(topicPartition,
                        offsetError);
                // 2. Commit offset for a given topic and subscription-name/consumer-name.
                OffsetCommitRequest offsetReq = new OffsetCommitRequest(arguments.groupName, requestInfo, (short) -1, 0,
                        "c1");
                consumer.commitOffsets(offsetReq);
            }
        }

        consumer.close();
    }
 
Example #18
Source File: TestKafkaSink.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testDefaultParameters() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"acks\": 1\n" +
            "}";


    KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
    while (msgIterator.hasNext()) {
        sink.writeTo(new StringMessage(msgIterator.next()));
    }
    assertTrue(sink.getNumOfPendingMessages() > 0);
    sink.close();
    assertEquals(sink.getNumOfPendingMessages(), 0);
    System.out.println(sink.getStat());

    // get the leader
    Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
    assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
    int leader = (Integer) leaderOpt.get();

    KafkaConfig config;
    if (leader == kafkaServer.getServer(0).config().brokerId()) {
        config = kafkaServer.getServer(0).config();
    } else {
        config = kafkaServer.getServer(1).config();
    }
    SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
    FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());

    List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
    assertEquals("Should have fetched 2 messages", 2, messageSet.size());

    assertEquals(new String(extractMessage(messageSet, 0)), "testMessage" + 0);
    assertEquals(new String(extractMessage(messageSet, 1)), "testMessage" + 1);
}
 
Example #19
Source File: TestKafkaSinkV2.java    From suro with Apache License 2.0 4 votes vote down vote up
@Test
public void testDefaultParameters() throws IOException {
    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME,
                    "--replication-factor", "2", "--partitions", "1"}));
    String description = "{\n" +
            "    \"type\": \"kafka\",\n" +
            "    \"client.id\": \"kafkasink\",\n" +
            "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
            "    \"request.required.acks\": 1\n" +
            "}";

    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka"));
    KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){});
    sink.open();
    // create send test messages to Kafka
    Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator();
    HashSet<String> sentPayloads = new HashSet<String>(); // track sent messages for comparison later
    while (msgIterator.hasNext()) {
        StringMessage next = new StringMessage(msgIterator.next());
        sink.writeTo(next); // send
        sentPayloads.add( new String( next.getMessage().getPayload() ) ); // record
    }
    sink.close();
    assertEquals(sink.getNumOfPendingMessages(), 0);
    System.out.println(sink.getStat());

    // get the leader
    Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0);
    assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined());
    int leader = (Integer) leaderOpt.get();

    KafkaConfig config;
    if (leader == kafkaServer.getServer(0).config().brokerId()) {
        config = kafkaServer.getServer(0).config();
    } else {
        config = kafkaServer.getServer(1).config();
    }
    // get data back from Kafka
    SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId");
    FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build());

    List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator());
    assertEquals("Should have fetched 2 messages", 2, messageSet.size());

    for( int i=0; i<messageSet.size(); i++ ){
        // ensure that received message was one that was sent
        String receivedPayload = new String(extractMessage(messageSet, i));
        System.out.println( "Got message: " + new String( receivedPayload ) );
        assert( sentPayloads.remove( receivedPayload ) );
    }
    assertEquals(sentPayloads.size(), 0); // all sent messages should have been received
}
 
Example #20
Source File: KafkaConsumer.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {

        String topic = config.topic;
        FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes)
                .maxWait(config.fetchWaitMaxMs).build();
        FetchResponse fetchResponse = null;
        SimpleConsumer simpleConsumer = null;
        try {
            simpleConsumer = findLeaderConsumer(partition);
            if (simpleConsumer == null) {
                // LOG.error(message);
                return null;
            }
            fetchResponse = simpleConsumer.fetch(req);
        } catch (Exception e) {
            if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException
                    || e instanceof UnresolvedAddressException) {
                LOG.warn("Network error when fetching messages:", e);
                if (simpleConsumer != null) {
                    String host = simpleConsumer.host();
                    int port = simpleConsumer.port();
                    simpleConsumer = null;
                    throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
                }

            } else {
                throw new RuntimeException(e);
            }
        }
        if (fetchResponse.hasError()) {
            short code = fetchResponse.errorCode(topic, partition);
            if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
                long startOffset = getOffset(topic, partition, config.startOffsetTime);
                offset = startOffset;
            }
            if(leaderBroker != null) {
                LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition["
                    + partition + "] error:" + code);
            }else {
                
            }
            return null;
        } else {
            ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
            return msgs;
        }
    }
 
Example #21
Source File: MessageService.java    From kafka-monitor with Apache License 2.0 4 votes vote down vote up
public List<Message> getMesage(String topicName, int partitionID, int offset, int count) {
    Topic topic = kafkaService.getTopic(topicName);
    Partition partition = topic.getPartition(partitionID);
    Broker broker = kafkaService.getBrokerById(partition.getLeader().getId());

    SimpleConsumer consumer = new SimpleConsumer(broker.getHost(), broker.getPort(), 10000, 10000, "");
    FetchRequestBuilder requestBuilder = new FetchRequestBuilder()
            .clientId("kafkaMonitor")
            .maxWait(5000)
            .minBytes(1);
    List<Message> messageList = new ArrayList<>(count);
    long currentOffset = offset;
    while (messageList.size() < count) {
        kafka.api.FetchRequest request = requestBuilder.addFetch(topicName, partitionID, currentOffset, 1024 * 1024).build();

        kafka.javaapi.FetchResponse response = consumer.fetch(request);
        ByteBufferMessageSet messageSet = response.messageSet(topicName, partitionID);
        if (messageSet.validBytes() <= 0) break;

        int oldSize = messageList.size();
        StreamSupport.stream(messageSet.spliterator(), false)
                .limit(count - messageList.size())
                .map(MessageAndOffset::message)
                .map((msg) -> {
                    Message mmsg = new Message();
                    if (msg.hasKey()) {
                        mmsg.setKey(readString(msg.key()));
                    }
                    if (!msg.isNull()) {
                        mmsg.setMessage(readString(msg.payload()));
                    }
                    mmsg.setValid(msg.isValid());
                    mmsg.setCompressionCodec(msg.compressionCodec().name());
                    mmsg.setChecksum(msg.checksum());
                    return mmsg;
                }).forEach(messageList::add);
        currentOffset += messageList.size() - oldSize;

    }
    consumer.close();
    return messageList;
}