Java Code Examples for kafka.message.MessageAndOffset#offset()

The following examples show how to use kafka.message.MessageAndOffset#offset() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SimpleKafkaConsumer.java    From twill with Apache License 2.0 6 votes vote down vote up
/**
 * Creates an Iterator of FetchedMessage based on the given message set. The iterator would also updates
 * the offset while iterating.
 */
private Iterator<FetchedMessage> createFetchedMessages(ByteBufferMessageSet messageSet, final AtomicLong offset) {
  final Iterator<MessageAndOffset> messages = messageSet.iterator();
  return new AbstractIterator<FetchedMessage>() {
    @Override
    protected FetchedMessage computeNext() {
      while (messages.hasNext()) {
        MessageAndOffset message = messages.next();
        long msgOffset = message.offset();
        if (msgOffset < offset.get()) {
          LOG.trace("Received old offset {}, expecting {} on {}. Message Ignored.",
                    msgOffset, offset.get(), topicPart);
          continue;
        }

        fetchedMessage.setPayload(message.message().payload());
        fetchedMessage.setOffset(message.offset());
        fetchedMessage.setNextOffset(message.nextOffset());

        return fetchedMessage;
      }
      return endOfData();
    }
  };
}
 
Example 2
Source File: DemoLowLevelConsumer.java    From KafkaExample with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
	final String topic = "topic1";
	String clientID = "DemoLowLevelConsumer1";
	SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID);
	FetchRequest req = new FetchRequestBuilder().clientId(clientID)
			.addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build();
	FetchResponse fetchResponse = simpleConsumer.fetch(req);
	ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0);
	for (MessageAndOffset messageAndOffset : messageSet) {
		ByteBuffer payload = messageAndOffset.message().payload();
		long offset = messageAndOffset.offset();
		byte[] bytes = new byte[payload.limit()];
		payload.get(bytes);
		System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8"));
	}
}
 
Example 3
Source File: KafkaSimpleConsumer.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Override
  public void run()
  {
    long offset = 0;
    while (isAlive) {
      // create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of 1MB
      FetchRequest fetchRequest = new FetchRequestBuilder().clientId("default_client").addFetch("topic1", 1, offset, 1000000).build();

//      FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000);

      // get the message set from the consumer and print them out
      ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1);
      Iterator<MessageAndOffset> itr = messages.iterator();

      while (itr.hasNext() && isAlive) {
        MessageAndOffset msg = itr.next();
        // advance the offset after consuming each message
        offset = msg.offset();
        logger.debug("consumed: {} offset: {}", byteBufferToString(msg.message().payload()).toString(), offset);
        receiveCount++;
      }
    }
  }
 
Example 4
Source File: KafkaLeaderReader.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
private int dispatch(FetchResponse response) {
   int numDispatched = 0;
   for(TopicAndPartition tap: new ArrayList<>(offsets.keySet())) {
      short errorCode = response.errorCode(tap.topic(), tap.partition());
      if(errorCode != 0) {
         logger.warn("Error reading from topic: [{}] partition: [{}]", tap.topic(), tap.partition(), ErrorMapping.exceptionFor(errorCode));
         continue;
      }

      ByteBufferMessageSet message = response.messageSet(tap.topic(), tap.partition());
      for(MessageAndOffset mao: message) {
         Long offset = offsets.get(tap);
         if(offset != null && offset > mao.offset()) {
            // skip older offsets
            continue;
         }
         KafkaConsumer handler = handlers.computeIfAbsent(tap, handlerFactory);
         if(handler == null) {
            logger.debug("No handler for topic: [{}] partition: [{}], this partition won't be processed", tap.topic(), tap.partition());
            offsets.remove(tap);
            handlers.remove(tap);
            break;
         }
         if(handler.apply(tap, mao.message())) {
            numDispatched++;
            offsets.put(tap, mao.nextOffset());
         }
         else {
            logger.debug("Done processing topic: [{}] partition: [{}]", tap.topic(), tap.partition());
            offsets.remove(tap);
            handlers.remove(tap);
            break;
         }
      }
   }

   return numDispatched;
}
 
Example 5
Source File: KafkaLeaderReader.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
public void update(ByteBufferMessageSet bbms) {
   boolean anyMatches = false;
   V value = null;
   for(MessageAndOffset mao: bbms) {
      anyMatches = true;

      value = factory.apply(mao.message());
      logger.trace("Scanning for [{}] in [{}] at [{}]@[{},{},{}]: ", target, tap.partition(), value, startOffset, mao.offset(), endOffset);
      int delta = target.compareTo(value);
      if(delta == 0) {
         logger.debug("Found exact offset for partition: [{}] value: [{}]", tap.partition(), value);
         this.offset = mao.offset();
         return;
      }
      else if(delta > 0) { // not far enough
         this.startOffset = mao.offset();
      }
      else if(delta < 0) { // too far
         this.endOffset = mao.offset();
         break; // don't process the next message or we'll think we're past the end
      }
   }

   if((endOffset - startOffset) < 2) {
      logger.debug("Found offset for partition: [{}] value: [{}]", tap.partition(), value);
      this.offset = this.endOffset; // start with the next message after value
   }
   else if(!anyMatches) {
      logger.debug("Reached the end of partition [{}] using offset [{}]", tap.partition(), endOffset);
      this.offset = this.endOffset;
   }
}
 
Example 6
Source File: KafkaReader.java    From HiveKa with Apache License 2.0 5 votes vote down vote up
/**
 * Fetches the next Kafka message and stuffs the results into the key and
 * value
 *
 * @param key
 * @param payload
 * @param pKey
 * @return true if there exists more events
 * @throws IOException
 */
public boolean getNext(KafkaKey key, BytesWritable payload ,BytesWritable pKey) throws IOException {
  if (hasNext()) {

    MessageAndOffset msgAndOffset = messageIter.next();
    Message message = msgAndOffset.message();

    ByteBuffer buf = message.payload();
    int origSize = buf.remaining();
    byte[] bytes = new byte[origSize];
    buf.get(bytes, buf.position(), origSize);
    payload.set(bytes, 0, origSize);

    buf = message.key();
    if(buf != null){
      origSize = buf.remaining();
      bytes = new byte[origSize];
      buf.get(bytes, buf.position(), origSize);
      pKey.set(bytes, 0, origSize);
    }

    key.clear();
    key.set(kafkaRequest.getTopic(), kafkaRequest.getLeaderId(),
        kafkaRequest.getPartition(), currentOffset,
        msgAndOffset.offset() + 1, message.checksum());

    key.setMessageSize(msgAndOffset.message().size());

    currentOffset = msgAndOffset.offset() + 1; // increase offset
    currentCount++; // increase count

    return true;
  } else {
    return false;
  }
}
 
Example 7
Source File: KafkaSimpleConsumer.java    From Pistachio with Apache License 2.0 5 votes vote down vote up
private Iterable<BytesMessageWithOffset> filterAndDecode(Iterable<MessageAndOffset> kafkaMessages, long offset) {
    List<BytesMessageWithOffset> ret = new LinkedList<>();
    for (MessageAndOffset msgAndOffset: kafkaMessages) {
        if (msgAndOffset.offset() >= offset) {
            byte[] payload = decoder.fromMessage(msgAndOffset.message());
            // add nextOffset here, thus next fetch will use nextOffset instead of current offset
            ret.add(new BytesMessageWithOffset(payload, msgAndOffset.nextOffset()));
        }
    }
    return ret;
}
 
Example 8
Source File: KafkaPartitionReader.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
byte[] getCurrentMessagePayload() {
  while(currentMessageSetIterator.hasNext()) {
    MessageAndOffset messageAndOffset = currentMessageSetIterator.next();
    if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore
    Message message = messageAndOffset.message();
    ByteBuffer payload = message.payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    currentOffset = messageAndOffset.nextOffset();
    return bytes;
  }
  return null;
}
 
Example 9
Source File: KafkaPartitionReader.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
public List<byte[]> execute() throws Exception {
  FetchRequest req = 
      new FetchRequestBuilder().
      clientId(name).
      addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize).
      minBytes(1).
      maxWait(maxWait).
      build();
  
  FetchResponse fetchResponse = consumer.fetch(req);
  if(fetchResponse.hasError()) {
    short errorCode = fetchResponse.errorCode(topic, partitionMetadata.partitionId());
    String msg = "Kafka error code = " + errorCode + ", Partition  " + partitionMetadata.partitionId() ;
    throw new Exception(msg);
  }
  List<byte[]> holder = new ArrayList<byte[]>();
  ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId());
  int count = 0;
  for(MessageAndOffset messageAndOffset : messageSet) {
    if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore
    ByteBuffer payload = messageAndOffset.message().payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    holder.add(bytes);
    currentOffset = messageAndOffset.nextOffset();
    count++;
    if(count == maxRead) break;
  }
  return holder ;
}
 
Example 10
Source File: PartitionConsumer.java    From jstorm with Apache License 2.0 5 votes vote down vote up
private void fillMessages() {

        ByteBufferMessageSet msgs;
        try {
            long start = System.currentTimeMillis();
            msgs = consumer.fetchMessages(partition, emittingOffset + 1);
            
            if (msgs == null) {
                LOG.error("fetch null message from offset {}", emittingOffset);
                return;
            }
            
            int count = 0;
            for (MessageAndOffset msg : msgs) {
                count += 1;
                emittingMessages.add(msg);
                emittingOffset = msg.offset();
                pendingOffsets.add(emittingOffset);
                LOG.debug("fillmessage fetched a message:{}, offset:{}", msg.message().toString(), msg.offset());
            }
            long end = System.currentTimeMillis();
            LOG.info("fetch message from partition:"+partition+", offset:" + emittingOffset+", size:"+msgs.sizeInBytes()+", count:"+count +", time:"+(end-start));
        } catch (Exception e) {
            e.printStackTrace();
            LOG.error(e.getMessage(),e);
        }
    }
 
Example 11
Source File: LowLevelConsumerExample.java    From pulsar with Apache License 2.0 4 votes vote down vote up
private static void consumeMessage(Arguments arguments) {

        Properties properties = new Properties();
        properties.put(SimpleConsumer.HTTP_SERVICE_URL, arguments.httpServiceUrl);
        SimpleConsumer consumer = new SimpleConsumer(arguments.serviceUrl, 0, 0, 0, "clientId", properties);

        long readOffset = kafka.api.OffsetRequest.EarliestTime();
        kafka.api.FetchRequest fReq = new FetchRequestBuilder().clientId("c1")
                .addFetch(arguments.topicName, arguments.partitionIndex, readOffset, 100000).build();
        FetchResponse fetchResponse = consumer.fetch(fReq);

        TestDecoder decoder = new TestDecoder();
        int count = 0;
        while (count < arguments.totalMessages || arguments.totalMessages == -1) {
            // 1. Read from topic without subscription/consumer-group name.
            for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(arguments.topicName,
                    arguments.partitionIndex)) {
                MessageId msgIdOffset = (messageAndOffset instanceof PulsarMsgAndOffset)
                        ? ((PulsarMsgAndOffset) messageAndOffset).getFullOffset()
                        : null;
                long currentOffset = messageAndOffset.offset();
                if (currentOffset < readOffset) {
                    continue;
                }

                ByteBuffer payload = messageAndOffset.message().payload();

                byte[] bytes = new byte[payload.limit()];
                payload.get(bytes);
                Tweet tweet = decoder.fromBytes(bytes);
                log.info("Received tweet: {}-{}", tweet.userName, tweet.message);
                count++;

                TopicAndPartition topicPartition = new TopicAndPartition(arguments.topicName, arguments.partitionIndex);
                OffsetMetadataAndError offsetError = new OffsetMetadataAndError(msgIdOffset, null, (short) 0);
                Map<TopicAndPartition, OffsetMetadataAndError> requestInfo = Collections.singletonMap(topicPartition,
                        offsetError);
                // 2. Commit offset for a given topic and subscription-name/consumer-name.
                OffsetCommitRequest offsetReq = new OffsetCommitRequest(arguments.groupName, requestInfo, (short) -1, 0,
                        "c1");
                consumer.commitOffsets(offsetReq);
            }
        }

        consumer.close();
    }
 
Example 12
Source File: KafkaReader.java    From HiveKa with Apache License 2.0 4 votes vote down vote up
/**
 * Creates a fetch request.
 *
 * @return false if there's no more fetches
 * @throws IOException
 */

public boolean fetch() throws IOException {
  if (currentOffset >= lastOffset) {
    return false;
  }
  long tempTime = System.currentTimeMillis();
  TopicAndPartition topicAndPartition = new TopicAndPartition(
      kafkaRequest.getTopic(), kafkaRequest.getPartition());
  log.debug("\nAsking for offset : " + (currentOffset));
  PartitionFetchInfo partitionFetchInfo = new PartitionFetchInfo(
      currentOffset, fetchBufferSize);

  HashMap<TopicAndPartition, PartitionFetchInfo> fetchInfo = new HashMap<TopicAndPartition, PartitionFetchInfo>();
  fetchInfo.put(topicAndPartition, partitionFetchInfo);

  FetchRequest fetchRequest = new FetchRequest(
      1, // fetch request correlation id
      "hive_kafka_client", // client name
      1000, // fetch request max wait
      1024, // fetch request min bytes
      fetchInfo);

  FetchResponse fetchResponse = null;
  try {
    fetchResponse = simpleConsumer.fetch(fetchRequest);
    if (fetchResponse.hasError()) {
      log.info("Error encountered during a fetch request from Kafka");
      log.info("Error Code generated : "
          + fetchResponse.errorCode(kafkaRequest.getTopic(),
          kafkaRequest.getPartition()));
      return false;
    } else {
      ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(
          kafkaRequest.getTopic(), kafkaRequest.getPartition());
      lastFetchTime = (System.currentTimeMillis() - tempTime);
      log.debug("Time taken to fetch : "
          + (lastFetchTime / 1000) + " seconds");
      log.debug("The size of the ByteBufferMessageSet returned is : " + messageBuffer.sizeInBytes());
      int skipped = 0;
      totalFetchTime += lastFetchTime;
      messageIter = messageBuffer.iterator();
      //boolean flag = false;
      Iterator<MessageAndOffset> messageIter2 = messageBuffer
          .iterator();
      MessageAndOffset message = null;
      while (messageIter2.hasNext()) {
        message = messageIter2.next();
        if (message.offset() < currentOffset) {
          //flag = true;
          skipped++;
        } else {
          log.debug("Skipped offsets till : "
              + message.offset());
          break;
        }
      }
      log.debug("Number of offsets to be skipped: " + skipped);
      while(skipped !=0 )
      {
        MessageAndOffset skippedMessage = messageIter.next();
        log.debug("Skipping offset : " + skippedMessage.offset());
        skipped --;
      }

      if (!messageIter.hasNext()) {
        System.out
            .println("No more data left to process. Returning false");
        messageIter = null;
        return false;
      }

      return true;
    }
  } catch (Exception e) {
    log.info("Exception generated during fetch");
    e.printStackTrace();
    return false;
  }

}
 
Example 13
Source File: Kafka08ConsumerClient.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
public Kafka08ConsumerRecord(MessageAndOffset messageAndOffset, String topic, int partition) {
  super(messageAndOffset.offset(), messageAndOffset.message().size(), topic, partition);
  this.messageAndOffset = messageAndOffset;
}
 
Example 14
Source File: LegacyKafkaClient.java    From secor with Apache License 2.0 4 votes vote down vote up
private Message getMessage(TopicPartition topicPartition, long offset,
                           SimpleConsumer consumer) {
    LOG.debug("fetching message topic {} partition {} offset {}",
            topicPartition.getTopic(), topicPartition.getPartition(), offset);
    final int MAX_MESSAGE_SIZE_BYTES = mConfig.getMaxMessageSizeBytes();
    final String clientName = getClientName(topicPartition);
    kafka.api.FetchRequest request = new FetchRequestBuilder().clientId(clientName)
            .addFetch(topicPartition.getTopic(), topicPartition.getPartition(), offset,
                      MAX_MESSAGE_SIZE_BYTES)
            .build();
    FetchResponse response = consumer.fetch(request);
    if (response.hasError()) {
        consumer.close();
        int errorCode = response.errorCode(topicPartition.getTopic(), topicPartition.getPartition());

        if (errorCode == Errors.OFFSET_OUT_OF_RANGE.code()) {
          throw new MessageDoesNotExistException();
        } else {
          throw new RuntimeException("Error fetching offset data. Reason: " + errorCode);
        }
    }
    MessageAndOffset messageAndOffset = response.messageSet(
            topicPartition.getTopic(), topicPartition.getPartition()).iterator().next();
    byte[] keyBytes = null;
    if (messageAndOffset.message().hasKey()) {
        ByteBuffer key = messageAndOffset.message().key();
        keyBytes = new byte[key.limit()];
        key.get(keyBytes);
    }
    byte[] payloadBytes = null;
    if (!messageAndOffset.message().isNull()) {
        ByteBuffer payload = messageAndOffset.message().payload();
        payloadBytes = new byte[payload.limit()];
        payload.get(payloadBytes);
    }
    long timestamp = (mConfig.useKafkaTimestamp())
            ? mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(messageAndOffset)
            : 0l;

    return new Message(topicPartition.getTopic(), topicPartition.getPartition(),
            messageAndOffset.offset(), keyBytes, payloadBytes, timestamp, null);
}
 
Example 15
Source File: KafkaMessageReceiverImpl.java    From message-queue-client-framework with Apache License 2.0 2 votes vote down vote up
@Override
public synchronized List<V> receive(String topic, int partition, long beginOffset,
                                    long readOffset) {
    if (readOffset <= 0) {

        throw new IllegalArgumentException("read offset must be greater than 0");
    }

    List<V> messages = new ArrayList<V>();

    boolean returnFlag = false;

    for (int i = 0; i < 3; i++) {

        if (checkLeader(topic, partition, beginOffset)) {
            returnFlag = true;
            break;
        }
    }

    if (!returnFlag)
        return messages;

    for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(
            topic, partition)) {

        long currentOffset = messageAndOffset.offset();

        if (currentOffset > beginOffset + readOffset - 1) {

            break;
        }

        ByteBuffer valload = messageAndOffset.message().payload();

        byte[] vals = new byte[valload.limit()];

        valload.get(vals);

        @SuppressWarnings("unchecked")
        Decoder<V> decoder = (Decoder<V>) RefleTool.newInstance(pool.getValDecoderClass(), props);

        V val = decoder.fromBytes(vals);

        messages.add(val);
    }

    return messages;
}