Java Code Examples for kafka.message.MessageAndMetadata#partition()

The following examples show how to use kafka.message.MessageAndMetadata#partition() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaIndexingManager.java    From linden with Apache License 2.0 6 votes vote down vote up
public KafkaIndexingManager(final LindenConfig lindenConfig, ShardingStrategy shardingStrategy,
                            LindenCore lindenCore, DataProvider<MessageAndMetadata<byte[], byte[]>> provider) {
  super(provider, lindenConfig, lindenCore, new Function<MessageAndMetadata<byte[], byte[]>, LindenIndexRequest>() {
    @Override
    public LindenIndexRequest apply(MessageAndMetadata<byte[], byte[]> messageAndMetadata) {
      LindenIndexRequest indexRequest = null;
      long offset = messageAndMetadata.offset();
      long partition = messageAndMetadata.partition();
      String message = new String(messageAndMetadata.message());
      try {
        indexRequest = LindenIndexRequestParser.parse(lindenConfig.getSchema(), message);
        LOGGER.info("Parse index request : id={}, route={}, type={}, content({}/{})={}", indexRequest.getId(),
                    indexRequest.getRouteParam(), indexRequest.getType(), partition, offset, message);
      } catch (IOException e) {
        LOGGER.error("Parse index request failed : {} - {}", message, Throwables.getStackTraceAsString(e));
      }
      return indexRequest;
    }
  }, shardingStrategy);
}
 
Example 2
Source File: KafkaConsumer08.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
public MessageAndOffset read() throws StageException {
  try {
    //has next blocks indefinitely if consumer.timeout.ms is set to -1
    //But if consumer.timeout.ms is set to a value, like 6000, a ConsumerTimeoutException is thrown
    //if no message is written to kafka topic in that time.
    if(consumerIterator.hasNext()) {
      MessageAndMetadata<byte[], byte[]> messageAndMetadata = consumerIterator.next();
      byte[] message = messageAndMetadata.message();
      long offset = messageAndMetadata.offset();
      int partition = messageAndMetadata.partition();
      return new MessageAndOffset(messageAndMetadata.key(), message, offset, partition);
    }
    return null;
  } catch (ConsumerTimeoutException e) {
    /*For high level consumer the fetching logic is handled by a background
      fetcher thread and is hidden from user, for either case of
      1) broker down or
      2) no message is available
      the fetcher thread will keep retrying while the user thread will wait on the fetcher thread to put some
      data into the buffer until timeout. So in a sentence the high-level consumer design is to
      not let users worry about connect / reconnect issues.*/
    return null;
  }
}
 
Example 3
Source File: LegacyKafkaMessageIterator.java    From secor with Apache License 2.0 6 votes vote down vote up
@Override
public Message next() {
    MessageAndMetadata<byte[], byte[]> kafkaMessage;
    try {
        kafkaMessage = mIterator.next();
    } catch (ConsumerTimeoutException e) {
        throw new LegacyConsumerTimeoutException(e);
    }

    long timestamp = 0L;
    if (mConfig.useKafkaTimestamp()) {
        timestamp = mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(kafkaMessage);
    }

    return new Message(kafkaMessage.topic(), kafkaMessage.partition(),
            kafkaMessage.offset(), kafkaMessage.key(),
            kafkaMessage.message(), timestamp, null);
}
 
Example 4
Source File: MessageResource.java    From dropwizard-kafka-http with Apache License 2.0 5 votes vote down vote up
public Message(MessageAndMetadata<byte[], byte[]> message) {
    this.topic = message.topic();

    this.key = message.key() != null ? new String(message.key(), Charset.forName("utf-8")) : null;
    this.message = new String(message.message(), Charset.forName("utf-8"));

    this.partition = message.partition();
    this.offset = message.offset();
}
 
Example 5
Source File: InjectorSupport.java    From wisp with Apache License 2.0 4 votes vote down vote up
@Override
public void run() {

    LOGGER.info("start to run Injector{} for Topic{}", WispKafkaInjector.class.toString(), topic);

    while (it.hasNext()) {

        try {

            MessageAndMetadata<byte[], byte[]> mm = it.next();
            String message = new String(mm.message());

            // partition && offset
            long partition = mm.partition();
            long offset = mm.offset();

            MysqlEntry entry = gson.fromJson(message, MysqlEntry.class);

            // warp
            MysqlEntryWrap mysqlEntryWrap = new MysqlEntryWrap(topic, entry);

            LOGGER.debug(message);

            // 计算延迟时间
            long now = System.currentTimeMillis();
            long elapsedSinceMysql = (now - entry.getTime()) / 1000;
            long elapsedSinceCanal = (now - entry.getCanalTime()) / 1000;

            String originTableName = entry.getTable();

            if (injectorEventProcessTemplate != null) {
                injectorEventProcessTemplate.processEntry(mysqlEntryWrap);
            }

            LOGGER.info(
                    "Topic({}) Succeed to do Event{} inject from Table{}, mysql_delay={}, "
                            + "canal_delay={}, partition={}, offset={}",
                    topic,
                    entry.getEvent(),
                    originTableName, elapsedSinceMysql, elapsedSinceCanal, partition, offset);

        } catch (Throwable e) {

            LOGGER.error(e.toString());
        }
    }

}
 
Example 6
Source File: KafkaAvroJobStatusMonitorTest.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
private DecodeableKafkaRecord convertMessageAndMetadataToDecodableKafkaRecord(MessageAndMetadata messageAndMetadata) {
  ConsumerRecord consumerRecord = new ConsumerRecord<>(TOPIC, messageAndMetadata.partition(), messageAndMetadata.offset(), messageAndMetadata.key(), messageAndMetadata.message());
  return new Kafka09ConsumerClient.Kafka09ConsumerRecord(consumerRecord);
}