Java Code Examples for kafka.javaapi.FetchResponse#messageSet()

The following examples show how to use kafka.javaapi.FetchResponse#messageSet() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DemoLowLevelConsumer.java    From KafkaExample with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
	final String topic = "topic1";
	String clientID = "DemoLowLevelConsumer1";
	SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID);
	FetchRequest req = new FetchRequestBuilder().clientId(clientID)
			.addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build();
	FetchResponse fetchResponse = simpleConsumer.fetch(req);
	ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0);
	for (MessageAndOffset messageAndOffset : messageSet) {
		ByteBuffer payload = messageAndOffset.message().payload();
		long offset = messageAndOffset.offset();
		byte[] bytes = new byte[payload.limit()];
		payload.get(bytes);
		System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8"));
	}
}
 
Example 2
Source File: Kafka08ConsumerClient.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private Iterator<KafkaConsumerRecord> getIteratorFromFetchResponse(FetchResponse fetchResponse, KafkaPartition partition) {
  try {
    ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId());
    return Iterators.transform(messageBuffer.iterator(),
        new Function<kafka.message.MessageAndOffset, KafkaConsumerRecord>() {
          @Override
          public KafkaConsumerRecord apply(kafka.message.MessageAndOffset input) {
            return new Kafka08ConsumerRecord(input, partition.getTopicName(), partition.getId());
          }
        });
  } catch (Exception e) {
    log.warn(String.format("Failed to retrieve next message buffer for partition %s: %s."
        + "The remainder of this partition will be skipped.", partition, e));
    return null;
  }
}
 
Example 3
Source File: KafkaPartitionReader.java    From Scribengin with GNU Affero General Public License v3.0 6 votes vote down vote up
void nextMessageSet() throws Exception {
  FetchRequest req = 
      new FetchRequestBuilder().
      clientId(name).
      addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize).
      minBytes(1).
      maxWait(1000).
      build();
  
  FetchResponse fetchResponse = consumer.fetch(req);
  if(fetchResponse.hasError()) {
    throw new Exception("TODO: handle the error, reset the consumer....");
  }
  
  currentMessageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId());
  currentMessageSetIterator = currentMessageSet.iterator();
}
 
Example 4
Source File: KafkaLeaderReader.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
private int dispatch(FetchResponse response) {
   int numDispatched = 0;
   for(TopicAndPartition tap: new ArrayList<>(offsets.keySet())) {
      short errorCode = response.errorCode(tap.topic(), tap.partition());
      if(errorCode != 0) {
         logger.warn("Error reading from topic: [{}] partition: [{}]", tap.topic(), tap.partition(), ErrorMapping.exceptionFor(errorCode));
         continue;
      }

      ByteBufferMessageSet message = response.messageSet(tap.topic(), tap.partition());
      for(MessageAndOffset mao: message) {
         Long offset = offsets.get(tap);
         if(offset != null && offset > mao.offset()) {
            // skip older offsets
            continue;
         }
         KafkaConsumer handler = handlers.computeIfAbsent(tap, handlerFactory);
         if(handler == null) {
            logger.debug("No handler for topic: [{}] partition: [{}], this partition won't be processed", tap.topic(), tap.partition());
            offsets.remove(tap);
            handlers.remove(tap);
            break;
         }
         if(handler.apply(tap, mao.message())) {
            numDispatched++;
            offsets.put(tap, mao.nextOffset());
         }
         else {
            logger.debug("Done processing topic: [{}] partition: [{}]", tap.topic(), tap.partition());
            offsets.remove(tap);
            handlers.remove(tap);
            break;
         }
      }
   }

   return numDispatched;
}
 
Example 5
Source File: AbstractExactlyOnceKafkaOutputOperator.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
private void initializeLastProcessingOffset()
{
  // read last received kafka message
  TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());

  if (tm == null) {
    throw new RuntimeException("Failed to retrieve topic metadata");
  }

  partitionNum = tm.partitionsMetadata().size();

  lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum);

  for (PartitionMetadata pm : tm.partitionsMetadata()) {

    String leadBroker = pm.leader().host();
    int port = pm.leader().port();
    String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
    SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);

    long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);

    FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();

    FetchResponse fetchResponse = consumer.fetch(req);
    for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {

      Message m = messageAndOffset.message();

      ByteBuffer payload = m.payload();
      ByteBuffer key = m.key();
      byte[] valueBytes = new byte[payload.limit()];
      byte[] keyBytes = new byte[key.limit()];
      payload.get(valueBytes);
      key.get(keyBytes);
      lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
    }
  }
}
 
Example 6
Source File: KafkaWrapper.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private Iterator<MessageAndOffset> getIteratorFromFetchResponse(FetchResponse fetchResponse,
    KafkaPartition partition) {
  try {
    ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId());
    return messageBuffer.iterator();
  } catch (Exception e) {
    LOG.warn(String.format("Failed to retrieve next message buffer for partition %s: %s."
        + "The remainder of this partition will be skipped.", partition, e));
    return null;
  }
}
 
Example 7
Source File: KafkaPartitionReader.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
public List<byte[]> execute() throws Exception {
  FetchRequest req = 
      new FetchRequestBuilder().
      clientId(name).
      addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize).
      minBytes(1).
      maxWait(maxWait).
      build();
  
  FetchResponse fetchResponse = consumer.fetch(req);
  if(fetchResponse.hasError()) {
    short errorCode = fetchResponse.errorCode(topic, partitionMetadata.partitionId());
    String msg = "Kafka error code = " + errorCode + ", Partition  " + partitionMetadata.partitionId() ;
    throw new Exception(msg);
  }
  List<byte[]> holder = new ArrayList<byte[]>();
  ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId());
  int count = 0;
  for(MessageAndOffset messageAndOffset : messageSet) {
    if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore
    ByteBuffer payload = messageAndOffset.message().payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    holder.add(bytes);
    currentOffset = messageAndOffset.nextOffset();
    count++;
    if(count == maxRead) break;
  }
  return holder ;
}
 
Example 8
Source File: KafkaUtils.java    From storm-kafka-0.8-plus with Apache License 2.0 5 votes vote down vote up
public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset) {
    ByteBufferMessageSet msgs = null;
    String topic = config.topic;
    int partitionId = partition.partition;
    for (int errors = 0; errors < 2 && msgs == null; errors++) {
        FetchRequestBuilder builder = new FetchRequestBuilder();
        FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes).
                clientId(config.clientId).build();
        FetchResponse fetchResponse;
        try {
            fetchResponse = consumer.fetch(fetchRequest);
        } catch (Exception e) {
            if (e instanceof ConnectException) {
                throw new FailedFetchException(e);
            } else {
                throw new RuntimeException(e);
            }
        }
        if (fetchResponse.hasError()) {
            KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId));
            if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange && errors == 0) {
                long startOffset = getOffset(consumer, topic, partitionId, config.startOffsetTime);
                LOG.warn("Got fetch request with offset out of range: [" + offset + "]; " +
                        "retrying with default start offset time from configuration. " +
                        "configured start offset time: [" + config.startOffsetTime + "] offset: [" + startOffset + "]");
                offset = startOffset;
            } else {
                String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]";
                LOG.error(message);
                throw new FailedFetchException(message);
            }
        } else {
            msgs = fetchResponse.messageSet(topic, partitionId);
        }
    }
    return msgs;
}
 
Example 9
Source File: LowLevelConsumerExample.java    From pulsar with Apache License 2.0 4 votes vote down vote up
private static void consumeMessage(Arguments arguments) {

        Properties properties = new Properties();
        properties.put(SimpleConsumer.HTTP_SERVICE_URL, arguments.httpServiceUrl);
        SimpleConsumer consumer = new SimpleConsumer(arguments.serviceUrl, 0, 0, 0, "clientId", properties);

        long readOffset = kafka.api.OffsetRequest.EarliestTime();
        kafka.api.FetchRequest fReq = new FetchRequestBuilder().clientId("c1")
                .addFetch(arguments.topicName, arguments.partitionIndex, readOffset, 100000).build();
        FetchResponse fetchResponse = consumer.fetch(fReq);

        TestDecoder decoder = new TestDecoder();
        int count = 0;
        while (count < arguments.totalMessages || arguments.totalMessages == -1) {
            // 1. Read from topic without subscription/consumer-group name.
            for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(arguments.topicName,
                    arguments.partitionIndex)) {
                MessageId msgIdOffset = (messageAndOffset instanceof PulsarMsgAndOffset)
                        ? ((PulsarMsgAndOffset) messageAndOffset).getFullOffset()
                        : null;
                long currentOffset = messageAndOffset.offset();
                if (currentOffset < readOffset) {
                    continue;
                }

                ByteBuffer payload = messageAndOffset.message().payload();

                byte[] bytes = new byte[payload.limit()];
                payload.get(bytes);
                Tweet tweet = decoder.fromBytes(bytes);
                log.info("Received tweet: {}-{}", tweet.userName, tweet.message);
                count++;

                TopicAndPartition topicPartition = new TopicAndPartition(arguments.topicName, arguments.partitionIndex);
                OffsetMetadataAndError offsetError = new OffsetMetadataAndError(msgIdOffset, null, (short) 0);
                Map<TopicAndPartition, OffsetMetadataAndError> requestInfo = Collections.singletonMap(topicPartition,
                        offsetError);
                // 2. Commit offset for a given topic and subscription-name/consumer-name.
                OffsetCommitRequest offsetReq = new OffsetCommitRequest(arguments.groupName, requestInfo, (short) -1, 0,
                        "c1");
                consumer.commitOffsets(offsetReq);
            }
        }

        consumer.close();
    }
 
Example 10
Source File: KafkaLowLevelConsumer08.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Override
public List<MessageAndOffset> read(long offset) throws StageException {

  FetchRequest req = buildFetchRequest(offset);
  FetchResponse fetchResponse;
  try {
    fetchResponse = consumer.fetch(req);
  } catch (Exception e) {
    if(e instanceof SocketTimeoutException) {
      //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the
      //consumer if no message is available for consumption after the specified timeout value.
      //If this happens exit gracefully
      LOG.warn(KafkaErrors.KAFKA_28.getMessage());
      return Collections.emptyList();
    } else {
      throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e);
    }
  }

  if(fetchResponse.hasError()) {
    short code = fetchResponse.errorCode(topic, partition);
    if(code == ErrorMapping.OffsetOutOfRangeCode()) {
      //invalid offset
      offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
    } else {
      //try re-initializing connection with kafka
      consumer.close();
      consumer = null;
      leader = findNewLeader(leader, topic, partition);
    }

    //re-fetch
    req = buildFetchRequest(offset);
    fetchResponse = consumer.fetch(req);

    if(fetchResponse.hasError()) {
      //could not fetch the second time, give kafka some time
      LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset);
    }
  }

  List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>();
  for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
    long currentOffset = messageAndOffset.offset();
    if (currentOffset < offset) {
      LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset);
      continue;
    }
    ByteBuffer payload = messageAndOffset.message().payload();
    final Object key = messageAndOffset.message().key();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    MessageAndOffset partitionToPayloadMap = new MessageAndOffset(
        key,
        bytes,
        messageAndOffset.nextOffset(),
        partition
    );
    partitionToPayloadMapArrayList.add(partitionToPayloadMap);
  }
  return partitionToPayloadMapArrayList;
}
 
Example 11
Source File: KafkaLowLevelConsumer09.java    From datacollector with Apache License 2.0 4 votes vote down vote up
@Override
public List<MessageAndOffset> read(long offset) throws StageException {

  FetchRequest req = buildFetchRequest(offset);
  FetchResponse fetchResponse;
  try {
    fetchResponse = consumer.fetch(req);
  } catch (Exception e) {
    if(e instanceof SocketTimeoutException) {
      //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the
      //consumer if no message is available for consumption after the specified timeout value.
      //If this happens exit gracefully
      LOG.warn(KafkaErrors.KAFKA_28.getMessage());
      return Collections.emptyList();
    } else {
      throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e);
    }
  }

  if(fetchResponse.hasError()) {
    short code = fetchResponse.errorCode(topic, partition);
    if(code == ErrorMapping.OffsetOutOfRangeCode()) {
      //invalid offset
      offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName);
    } else {
      //try re-initializing connection with kafka
      consumer.close();
      consumer = null;
      leader = findNewLeader(leader, topic, partition);
    }

    //re-fetch
    req = buildFetchRequest(offset);
    fetchResponse = consumer.fetch(req);

    if(fetchResponse.hasError()) {
      //could not fetch the second time, give kafka some time
      LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset);
    }
  }

  List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>();
  for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) {
    long currentOffset = messageAndOffset.offset();
    if (currentOffset < offset) {
      LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset);
      continue;
    }
    ByteBuffer payload = messageAndOffset.message().payload();
    final Object key = messageAndOffset.message().key();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    MessageAndOffset partitionToPayloadMap = new MessageAndOffset(
        key,
        bytes,
        messageAndOffset.nextOffset(),
        partition
    );
    partitionToPayloadMapArrayList.add(partitionToPayloadMap);
  }
  return partitionToPayloadMapArrayList;
}
 
Example 12
Source File: KafkaReader.java    From HiveKa with Apache License 2.0 4 votes vote down vote up
/**
 * Creates a fetch request.
 *
 * @return false if there's no more fetches
 * @throws IOException
 */

public boolean fetch() throws IOException {
  if (currentOffset >= lastOffset) {
    return false;
  }
  long tempTime = System.currentTimeMillis();
  TopicAndPartition topicAndPartition = new TopicAndPartition(
      kafkaRequest.getTopic(), kafkaRequest.getPartition());
  log.debug("\nAsking for offset : " + (currentOffset));
  PartitionFetchInfo partitionFetchInfo = new PartitionFetchInfo(
      currentOffset, fetchBufferSize);

  HashMap<TopicAndPartition, PartitionFetchInfo> fetchInfo = new HashMap<TopicAndPartition, PartitionFetchInfo>();
  fetchInfo.put(topicAndPartition, partitionFetchInfo);

  FetchRequest fetchRequest = new FetchRequest(
      1, // fetch request correlation id
      "hive_kafka_client", // client name
      1000, // fetch request max wait
      1024, // fetch request min bytes
      fetchInfo);

  FetchResponse fetchResponse = null;
  try {
    fetchResponse = simpleConsumer.fetch(fetchRequest);
    if (fetchResponse.hasError()) {
      log.info("Error encountered during a fetch request from Kafka");
      log.info("Error Code generated : "
          + fetchResponse.errorCode(kafkaRequest.getTopic(),
          kafkaRequest.getPartition()));
      return false;
    } else {
      ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(
          kafkaRequest.getTopic(), kafkaRequest.getPartition());
      lastFetchTime = (System.currentTimeMillis() - tempTime);
      log.debug("Time taken to fetch : "
          + (lastFetchTime / 1000) + " seconds");
      log.debug("The size of the ByteBufferMessageSet returned is : " + messageBuffer.sizeInBytes());
      int skipped = 0;
      totalFetchTime += lastFetchTime;
      messageIter = messageBuffer.iterator();
      //boolean flag = false;
      Iterator<MessageAndOffset> messageIter2 = messageBuffer
          .iterator();
      MessageAndOffset message = null;
      while (messageIter2.hasNext()) {
        message = messageIter2.next();
        if (message.offset() < currentOffset) {
          //flag = true;
          skipped++;
        } else {
          log.debug("Skipped offsets till : "
              + message.offset());
          break;
        }
      }
      log.debug("Number of offsets to be skipped: " + skipped);
      while(skipped !=0 )
      {
        MessageAndOffset skippedMessage = messageIter.next();
        log.debug("Skipping offset : " + skippedMessage.offset());
        skipped --;
      }

      if (!messageIter.hasNext()) {
        System.out
            .println("No more data left to process. Returning false");
        messageIter = null;
        return false;
      }

      return true;
    }
  } catch (Exception e) {
    log.info("Exception generated during fetch");
    e.printStackTrace();
    return false;
  }

}
 
Example 13
Source File: KafkaConsumer.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {

        String topic = config.topic;
        FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes)
                .maxWait(config.fetchWaitMaxMs).build();
        FetchResponse fetchResponse = null;
        SimpleConsumer simpleConsumer = null;
        try {
            simpleConsumer = findLeaderConsumer(partition);
            if (simpleConsumer == null) {
                // LOG.error(message);
                return null;
            }
            fetchResponse = simpleConsumer.fetch(req);
        } catch (Exception e) {
            if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException
                    || e instanceof UnresolvedAddressException) {
                LOG.warn("Network error when fetching messages:", e);
                if (simpleConsumer != null) {
                    String host = simpleConsumer.host();
                    int port = simpleConsumer.port();
                    simpleConsumer = null;
                    throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
                }

            } else {
                throw new RuntimeException(e);
            }
        }
        if (fetchResponse.hasError()) {
            short code = fetchResponse.errorCode(topic, partition);
            if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
                long startOffset = getOffset(topic, partition, config.startOffsetTime);
                offset = startOffset;
            }
            if(leaderBroker != null) {
                LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition["
                    + partition + "] error:" + code);
            }else {
                
            }
            return null;
        } else {
            ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
            return msgs;
        }
    }