kafka.javaapi.message.ByteBufferMessageSet Java Examples

The following examples show how to use kafka.javaapi.message.ByteBufferMessageSet. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaBoltTest.java    From storm-kafka-0.8-plus with Apache License 2.0 6 votes vote down vote up
private boolean verifyMessage(String key, String message) {
    long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1;
    ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer,
            new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), lastMessageOffset);
    MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next();
    Message kafkaMessage = messageAndOffset.message();
    ByteBuffer messageKeyBuffer = kafkaMessage.key();
    String keyString = null;
    String messageString = new String(Utils.toByteArray(kafkaMessage.payload()));
    if (messageKeyBuffer != null) {
        keyString = new String(Utils.toByteArray(messageKeyBuffer));
    }
    assertEquals(key, keyString);
    assertEquals(message, messageString);
    return true;
}
 
Example #2
Source File: TridentKafkaEmitter.java    From storm-kafka-0.8-plus with Apache License 2.0 6 votes vote down vote up
/**
 * re-emit the batch described by the meta data provided
 *
 * @param attempt
 * @param collector
 * @param partition
 * @param meta
 */
private void reEmitPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map meta) {
    LOG.info("re-emitting batch, attempt " + attempt);
    String instanceId = (String) meta.get("instanceId");
    if (!_config.forceFromStart || instanceId.equals(_topologyInstanceId)) {
        SimpleConsumer consumer = _connections.register(partition);
        long offset = (Long) meta.get("offset");
        long nextOffset = (Long) meta.get("nextOffset");
        ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset);
        for (MessageAndOffset msg : msgs) {
            if (offset == nextOffset) {
                break;
            }
            if (offset > nextOffset) {
                throw new RuntimeException("Error when re-emitting batch. overshot the end offset");
            }
            emit(collector, msg.message());
            offset = msg.nextOffset();
        }
    }
}
 
Example #3
Source File: SimpleKafkaConsumer.java    From twill with Apache License 2.0 6 votes vote down vote up
/**
 * Creates an Iterator of FetchedMessage based on the given message set. The iterator would also updates
 * the offset while iterating.
 */
private Iterator<FetchedMessage> createFetchedMessages(ByteBufferMessageSet messageSet, final AtomicLong offset) {
  final Iterator<MessageAndOffset> messages = messageSet.iterator();
  return new AbstractIterator<FetchedMessage>() {
    @Override
    protected FetchedMessage computeNext() {
      while (messages.hasNext()) {
        MessageAndOffset message = messages.next();
        long msgOffset = message.offset();
        if (msgOffset < offset.get()) {
          LOG.trace("Received old offset {}, expecting {} on {}. Message Ignored.",
                    msgOffset, offset.get(), topicPart);
          continue;
        }

        fetchedMessage.setPayload(message.message().payload());
        fetchedMessage.setOffset(message.offset());
        fetchedMessage.setNextOffset(message.nextOffset());

        return fetchedMessage;
      }
      return endOfData();
    }
  };
}
 
Example #4
Source File: DemoLowLevelConsumer.java    From KafkaExample with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
	final String topic = "topic1";
	String clientID = "DemoLowLevelConsumer1";
	SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID);
	FetchRequest req = new FetchRequestBuilder().clientId(clientID)
			.addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build();
	FetchResponse fetchResponse = simpleConsumer.fetch(req);
	ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0);
	for (MessageAndOffset messageAndOffset : messageSet) {
		ByteBuffer payload = messageAndOffset.message().payload();
		long offset = messageAndOffset.offset();
		byte[] bytes = new byte[payload.limit()];
		payload.get(bytes);
		System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8"));
	}
}
 
Example #5
Source File: KafkaSimpleConsumer.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
@Override
  public void run()
  {
    long offset = 0;
    while (isAlive) {
      // create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of 1MB
      FetchRequest fetchRequest = new FetchRequestBuilder().clientId("default_client").addFetch("topic1", 1, offset, 1000000).build();

//      FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000);

      // get the message set from the consumer and print them out
      ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1);
      Iterator<MessageAndOffset> itr = messages.iterator();

      while (itr.hasNext() && isAlive) {
        MessageAndOffset msg = itr.next();
        // advance the offset after consuming each message
        offset = msg.offset();
        logger.debug("consumed: {} offset: {}", byteBufferToString(msg.message().payload()).toString(), offset);
        receiveCount++;
      }
    }
  }
 
Example #6
Source File: PartitionManager.java    From storm-kafka-0.8-plus with Apache License 2.0 6 votes vote down vote up
private void fill() {
    long start = System.nanoTime();
    ByteBufferMessageSet msgs = KafkaUtils.fetchMessages(_spoutConfig, _consumer, _partition, _emittedToOffset);
    long end = System.nanoTime();
    long millis = (end - start) / 1000000;
    _fetchAPILatencyMax.update(millis);
    _fetchAPILatencyMean.update(millis);
    _fetchAPICallCount.incr();
    int numMessages = countMessages(msgs);
    _fetchAPIMessageCount.incrBy(numMessages);

    if (numMessages > 0) {
        LOG.info("Fetched " + numMessages + " messages from: " + _partition);
    }
    for (MessageAndOffset msg : msgs) {
        _pending.add(_emittedToOffset);
        _waitingToEmit.add(new MessageAndRealOffset(msg.message(), _emittedToOffset));
        _emittedToOffset = msg.nextOffset();
    }
    if (numMessages > 0) {
        LOG.info("Added " + numMessages + " messages from: " + _partition + " to internal buffers");
    }
}
 
Example #7
Source File: Kafka08ConsumerClient.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private Iterator<KafkaConsumerRecord> getIteratorFromFetchResponse(FetchResponse fetchResponse, KafkaPartition partition) {
  try {
    ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId());
    return Iterators.transform(messageBuffer.iterator(),
        new Function<kafka.message.MessageAndOffset, KafkaConsumerRecord>() {
          @Override
          public KafkaConsumerRecord apply(kafka.message.MessageAndOffset input) {
            return new Kafka08ConsumerRecord(input, partition.getTopicName(), partition.getId());
          }
        });
  } catch (Exception e) {
    log.warn(String.format("Failed to retrieve next message buffer for partition %s: %s."
        + "The remainder of this partition will be skipped.", partition, e));
    return null;
  }
}
 
Example #8
Source File: KafkaPartitionLevelConsumer.java    From incubator-pinot with Apache License 2.0 6 votes vote down vote up
private Iterable<MessageAndOffset> buildOffsetFilteringIterable(final ByteBufferMessageSet messageAndOffsets,
    final long startOffset, final long endOffset) {
  return Iterables.filter(messageAndOffsets, input -> {
    // Filter messages that are either null or have an offset ∉ [startOffset; endOffset[
    if (input == null || input.offset() < startOffset || (endOffset <= input.offset() && endOffset != -1)) {
      return false;
    }

    // Check the message's checksum
    // TODO We might want to have better handling of this situation, maybe try to fetch the message again?
    if (!input.message().isValid()) {
      LOGGER.warn("Discarded message with invalid checksum in partition {} of topic {}", _partition, _topic);
      return false;
    }

    return true;
  });
}
 
Example #9
Source File: PartitionConsumer.java    From jstorm with Apache License 2.0 5 votes vote down vote up
private void fillMessages() {

        ByteBufferMessageSet msgs;
        try {
            long start = System.currentTimeMillis();
            msgs = consumer.fetchMessages(partition, emittingOffset + 1);
            
            if (msgs == null) {
                LOG.error("fetch null message from offset {}", emittingOffset);
                return;
            }
            
            int count = 0;
            for (MessageAndOffset msg : msgs) {
                count += 1;
                emittingMessages.add(msg);
                emittingOffset = msg.offset();
                pendingOffsets.add(emittingOffset);
                LOG.debug("fillmessage fetched a message:{}, offset:{}", msg.message().toString(), msg.offset());
            }
            long end = System.currentTimeMillis();
            LOG.info("fetch message from partition:"+partition+", offset:" + emittingOffset+", size:"+msgs.sizeInBytes()+", count:"+count +", time:"+(end-start));
        } catch (Exception e) {
            e.printStackTrace();
            LOG.error(e.getMessage(),e);
        }
    }
 
Example #10
Source File: KafkaClientTest.java    From elasticsearch-river-kafka with Apache License 2.0 5 votes vote down vote up
public void testFetch()
{
	expect(mockConsumer.fetch(anyObject(FetchRequest.class))).andReturn(new ByteBufferMessageSet(Collections.EMPTY_LIST));	
	replay(mockConsumer, mockCurator);		
	client.fetch("my_topic", 0, 1717, 1024);
	verify(mockConsumer, mockCurator);
}
 
Example #11
Source File: TridentKafkaEmitter.java    From storm-kafka-0.8-plus with Apache License 2.0 5 votes vote down vote up
private Map doEmitNewPartitionBatch(SimpleConsumer consumer, Partition partition, TridentCollector collector, Map lastMeta) {
    long offset;
    if (lastMeta != null) {
        String lastInstanceId = null;
        Map lastTopoMeta = (Map) lastMeta.get("topology");
        if (lastTopoMeta != null) {
            lastInstanceId = (String) lastTopoMeta.get("id");
        }
        if (_config.forceFromStart && !_topologyInstanceId.equals(lastInstanceId)) {
            offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config.startOffsetTime);
        } else {
            offset = (Long) lastMeta.get("nextOffset");
        }
    } else {
        offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config);
    }
    ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset);
    long endoffset = offset;
    for (MessageAndOffset msg : msgs) {
        emit(collector, msg.message());
        endoffset = msg.nextOffset();
    }
    Map newMeta = new HashMap();
    newMeta.put("offset", offset);
    newMeta.put("nextOffset", endoffset);
    newMeta.put("instanceId", _topologyInstanceId);
    newMeta.put("partition", partition.partition);
    newMeta.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port));
    newMeta.put("topic", _config.topic);
    newMeta.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId));
    return newMeta;
}
 
Example #12
Source File: TridentKafkaEmitter.java    From storm-kafka-0.8-plus with Apache License 2.0 5 votes vote down vote up
private ByteBufferMessageSet fetchMessages(SimpleConsumer consumer, Partition partition, long offset) {
    long start = System.nanoTime();
    ByteBufferMessageSet msgs = KafkaUtils.fetchMessages(_config, consumer, partition, offset);
    long end = System.nanoTime();
    long millis = (end - start) / 1000000;
    _kafkaMeanFetchLatencyMetric.update(millis);
    _kafkaMaxFetchLatencyMetric.update(millis);
    return msgs;
}
 
Example #13
Source File: PartitionManager.java    From storm-kafka-0.8-plus with Apache License 2.0 5 votes vote down vote up
private int countMessages(ByteBufferMessageSet messageSet) {
    int counter = 0;
    for (MessageAndOffset messageAndOffset : messageSet) {
        counter = counter + 1;
    }
    return counter;
}
 
Example #14
Source File: KafkaUtils.java    From storm-kafka-0.8-plus with Apache License 2.0 5 votes vote down vote up
public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset) {
    ByteBufferMessageSet msgs = null;
    String topic = config.topic;
    int partitionId = partition.partition;
    for (int errors = 0; errors < 2 && msgs == null; errors++) {
        FetchRequestBuilder builder = new FetchRequestBuilder();
        FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes).
                clientId(config.clientId).build();
        FetchResponse fetchResponse;
        try {
            fetchResponse = consumer.fetch(fetchRequest);
        } catch (Exception e) {
            if (e instanceof ConnectException) {
                throw new FailedFetchException(e);
            } else {
                throw new RuntimeException(e);
            }
        }
        if (fetchResponse.hasError()) {
            KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId));
            if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange && errors == 0) {
                long startOffset = getOffset(consumer, topic, partitionId, config.startOffsetTime);
                LOG.warn("Got fetch request with offset out of range: [" + offset + "]; " +
                        "retrying with default start offset time from configuration. " +
                        "configured start offset time: [" + config.startOffsetTime + "] offset: [" + startOffset + "]");
                offset = startOffset;
            } else {
                String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]";
                LOG.error(message);
                throw new FailedFetchException(message);
            }
        } else {
            msgs = fetchResponse.messageSet(topic, partitionId);
        }
    }
    return msgs;
}
 
Example #15
Source File: KafkaUtilsTest.java    From storm-kafka-0.8-plus with Apache License 2.0 5 votes vote down vote up
@Test
public void fetchMessage() throws Exception {
    String value = "test";
    createTopicAndSendMessage(value);
    long offset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1;
    ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer,
            new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), offset);
    String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
    assertThat(message, is(equalTo(value)));
}
 
Example #16
Source File: KafkaUtilsTest.java    From storm-kafka-0.8-plus with Apache License 2.0 5 votes vote down vote up
@Test
public void fetchMessagesWithInvalidOffsetAndDefaultHandlingEnabled() throws Exception {
    config = new KafkaConfig(brokerHosts, "newTopic");
    String value = "test";
    createTopicAndSendMessage(value);
    ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer,
            new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), -99);
    String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload()));
    assertThat(message, is(equalTo(value)));
}
 
Example #17
Source File: KafkaUtilsTest.java    From storm-kafka-0.8-plus with Apache License 2.0 5 votes vote down vote up
@Test
public void generateTuplesWithKeyAndKeyValueScheme() {
    config.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme());
    String value = "value";
    String key = "key";
    createTopicAndSendMessage(key, value);
    ByteBufferMessageSet messageAndOffsets = getLastMessage();
    for (MessageAndOffset msg : messageAndOffsets) {
        Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message());
        assertEquals(ImmutableMap.of(key, value), lists.iterator().next().get(0));
    }
}
 
Example #18
Source File: KafkaUtilsTest.java    From storm-kafka-0.8-plus with Apache License 2.0 5 votes vote down vote up
@Test
public void generateTuplesWithValueSchemeAndKeyValueMessage() {
    config.scheme = new SchemeAsMultiScheme(new StringScheme());
    String value = "value";
    String key = "key";
    createTopicAndSendMessage(key, value);
    ByteBufferMessageSet messageAndOffsets = getLastMessage();
    for (MessageAndOffset msg : messageAndOffsets) {
        Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message());
        assertEquals(value, lists.iterator().next().get(0));
    }
}
 
Example #19
Source File: KafkaUtilsTest.java    From storm-kafka-0.8-plus with Apache License 2.0 5 votes vote down vote up
private void runGetValueOnlyTuplesTest() {
    String value = "value";
    createTopicAndSendMessage(null, value);
    ByteBufferMessageSet messageAndOffsets = getLastMessage();
    for (MessageAndOffset msg : messageAndOffsets) {
        Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message());
        assertEquals(value, lists.iterator().next().get(0));
    }
}
 
Example #20
Source File: KafkaPartitionReader.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
public List<byte[]> execute() throws Exception {
  FetchRequest req = 
      new FetchRequestBuilder().
      clientId(name).
      addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize).
      minBytes(1).
      maxWait(maxWait).
      build();
  
  FetchResponse fetchResponse = consumer.fetch(req);
  if(fetchResponse.hasError()) {
    short errorCode = fetchResponse.errorCode(topic, partitionMetadata.partitionId());
    String msg = "Kafka error code = " + errorCode + ", Partition  " + partitionMetadata.partitionId() ;
    throw new Exception(msg);
  }
  List<byte[]> holder = new ArrayList<byte[]>();
  ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId());
  int count = 0;
  for(MessageAndOffset messageAndOffset : messageSet) {
    if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore
    ByteBuffer payload = messageAndOffset.message().payload();
    byte[] bytes = new byte[payload.limit()];
    payload.get(bytes);
    holder.add(bytes);
    currentOffset = messageAndOffset.nextOffset();
    count++;
    if(count == maxRead) break;
  }
  return holder ;
}
 
Example #21
Source File: KafkaLeaderReader.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
private int dispatch(FetchResponse response) {
   int numDispatched = 0;
   for(TopicAndPartition tap: new ArrayList<>(offsets.keySet())) {
      short errorCode = response.errorCode(tap.topic(), tap.partition());
      if(errorCode != 0) {
         logger.warn("Error reading from topic: [{}] partition: [{}]", tap.topic(), tap.partition(), ErrorMapping.exceptionFor(errorCode));
         continue;
      }

      ByteBufferMessageSet message = response.messageSet(tap.topic(), tap.partition());
      for(MessageAndOffset mao: message) {
         Long offset = offsets.get(tap);
         if(offset != null && offset > mao.offset()) {
            // skip older offsets
            continue;
         }
         KafkaConsumer handler = handlers.computeIfAbsent(tap, handlerFactory);
         if(handler == null) {
            logger.debug("No handler for topic: [{}] partition: [{}], this partition won't be processed", tap.topic(), tap.partition());
            offsets.remove(tap);
            handlers.remove(tap);
            break;
         }
         if(handler.apply(tap, mao.message())) {
            numDispatched++;
            offsets.put(tap, mao.nextOffset());
         }
         else {
            logger.debug("Done processing topic: [{}] partition: [{}]", tap.topic(), tap.partition());
            offsets.remove(tap);
            handlers.remove(tap);
            break;
         }
      }
   }

   return numDispatched;
}
 
Example #22
Source File: KafkaRiver.java    From elasticsearch-river-kafka with Apache License 2.0 5 votes vote down vote up
void processNonEmptyMessageSet(ByteBufferMessageSet msgs)
{
	logger.debug("Processing {} bytes of messages ...", msgs.validBytes());
	BulkRequestBuilder bulkRequestBuilder = client.prepareBulk();
	handleMessages(bulkRequestBuilder, msgs);
	executeBuilder(bulkRequestBuilder);
	offset += msgs.validBytes();
	kafka.saveOffset(riverConfig.topic, riverConfig.partition, offset);
}
 
Example #23
Source File: KafkaRiver.java    From elasticsearch-river-kafka with Apache License 2.0 5 votes vote down vote up
void handleMessages(BulkRequestBuilder bulkRequestBuilder, ByteBufferMessageSet msgs)
{
	long numMsg = 0;
	for(MessageAndOffset mo : msgs)
	{
		++numMsg;
		++stats.numMessages;
		try {
			msgHandler.handle(bulkRequestBuilder, mo.message());
		} catch (Exception e) {
			logger.warn("Failed handling message", e);
		}
	}
	logger.debug("handleMessages processed {} messages", numMsg);
}
 
Example #24
Source File: KafkaPartitionLevelConsumerTest.java    From incubator-pinot with Apache License 2.0 5 votes vote down vote up
@Override
public ByteBufferMessageSet messageSet(String topic, int partition) {
  if (errorMap.containsKey(new TopicAndPartition(topic, partition))) {
    throw new IllegalArgumentException();
  } else {
    // TODO Maybe generate dummy messages here?
    return new ByteBufferMessageSet(Collections.<Message>emptyList());
  }
}
 
Example #25
Source File: KafkaWrapper.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private Iterator<MessageAndOffset> getIteratorFromFetchResponse(FetchResponse fetchResponse,
    KafkaPartition partition) {
  try {
    ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId());
    return messageBuffer.iterator();
  } catch (Exception e) {
    LOG.warn(String.format("Failed to retrieve next message buffer for partition %s: %s."
        + "The remainder of this partition will be skipped.", partition, e));
    return null;
  }
}
 
Example #26
Source File: SimpleKafkaConsumer.java    From twill with Apache License 2.0 5 votes vote down vote up
/**
 * Calls the message callback with the given message set.
 */
private void invokeCallback(ByteBufferMessageSet messages, AtomicLong offset) {
  long savedOffset = offset.get();
  try {
    offset.set(callback.onReceived(createFetchedMessages(messages, offset)));
  } catch (Throwable t) {
    LOG.error("Callback throws exception. Retry from offset {} for {}", startOffset, topicPart, t);
    offset.set(savedOffset);
  }
}
 
Example #27
Source File: SimpleKafkaConsumer.java    From twill with Apache License 2.0 5 votes vote down vote up
/**
 * Sleeps if the message set is empty.
 * @return {@code true} if it is empty, {@code false} otherwise.
 */
private boolean sleepIfEmpty(ByteBufferMessageSet messages) {
  if (Iterables.isEmpty(messages)) {
    LOG.trace("No message fetched. Sleep for {} ms before next fetch.", EMPTY_FETCH_WAIT);
    try {
      TimeUnit.MILLISECONDS.sleep(EMPTY_FETCH_WAIT);
    } catch (InterruptedException e) {
      // It's interrupted from stop, ok to ignore.
    }
    return true;
  }
  return false;
}
 
Example #28
Source File: SimpleConsumerDemo.java    From javabase with Apache License 2.0 5 votes vote down vote up
private static void printMessages(ByteBufferMessageSet messageSet) throws UnsupportedEncodingException {
    for (MessageAndOffset messageAndOffset : messageSet) {
        ByteBuffer payload = messageAndOffset.message().payload();
        byte[] bytes = new byte[payload.limit()];
        payload.get(bytes);
        System.out.println(new String(bytes, "UTF-8"));
    }
}
 
Example #29
Source File: KafkaLeaderReader.java    From arcusplatform with Apache License 2.0 5 votes vote down vote up
public void update(ByteBufferMessageSet bbms) {
   boolean anyMatches = false;
   V value = null;
   for(MessageAndOffset mao: bbms) {
      anyMatches = true;

      value = factory.apply(mao.message());
      logger.trace("Scanning for [{}] in [{}] at [{}]@[{},{},{}]: ", target, tap.partition(), value, startOffset, mao.offset(), endOffset);
      int delta = target.compareTo(value);
      if(delta == 0) {
         logger.debug("Found exact offset for partition: [{}] value: [{}]", tap.partition(), value);
         this.offset = mao.offset();
         return;
      }
      else if(delta > 0) { // not far enough
         this.startOffset = mao.offset();
      }
      else if(delta < 0) { // too far
         this.endOffset = mao.offset();
         break; // don't process the next message or we'll think we're past the end
      }
   }

   if((endOffset - startOffset) < 2) {
      logger.debug("Found offset for partition: [{}] value: [{}]", tap.partition(), value);
      this.offset = this.endOffset; // start with the next message after value
   }
   else if(!anyMatches) {
      logger.debug("Reached the end of partition [{}] using offset [{}]", tap.partition(), endOffset);
      this.offset = this.endOffset;
   }
}
 
Example #30
Source File: KafkaConsumer.java    From jstorm with Apache License 2.0 4 votes vote down vote up
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException {

        String topic = config.topic;
        FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes)
                .maxWait(config.fetchWaitMaxMs).build();
        FetchResponse fetchResponse = null;
        SimpleConsumer simpleConsumer = null;
        try {
            simpleConsumer = findLeaderConsumer(partition);
            if (simpleConsumer == null) {
                // LOG.error(message);
                return null;
            }
            fetchResponse = simpleConsumer.fetch(req);
        } catch (Exception e) {
            if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException
                    || e instanceof UnresolvedAddressException) {
                LOG.warn("Network error when fetching messages:", e);
                if (simpleConsumer != null) {
                    String host = simpleConsumer.host();
                    int port = simpleConsumer.port();
                    simpleConsumer = null;
                    throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e);
                }

            } else {
                throw new RuntimeException(e);
            }
        }
        if (fetchResponse.hasError()) {
            short code = fetchResponse.errorCode(topic, partition);
            if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) {
                long startOffset = getOffset(topic, partition, config.startOffsetTime);
                offset = startOffset;
            }
            if(leaderBroker != null) {
                LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition["
                    + partition + "] error:" + code);
            }else {
                
            }
            return null;
        } else {
            ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition);
            return msgs;
        }
    }