Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecord#topic()

The following examples show how to use org.apache.kafka.clients.consumer.ConsumerRecord#topic() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TopologyService.java    From eventapis with Apache License 2.0 6 votes vote down vote up
public void onEventMessage(ConsumerRecord<String, Serializable> record, PublishedEventWrapper eventWrapper) {
    try {
        String topic = record.topic();
        String key = record.key();
        log.info("Event: " + topic + " Key:" + key + " Event:" + eventWrapper.toString());
        if (topic.equals("operation-events"))
            return;
        BaseEvent baseEvent = eventReader.readValue(eventWrapper.getEvent());

        List<String> targetList = new ArrayList<>(topicsMap.get(topic).getServiceDataHashMap().keySet());

        operationsMap.putIfAbsent(key,
                new Topology(eventWrapper.getContext().getOpId(), eventWrapper.getContext().getParentOpId()),
                calculateTimeout(eventWrapper.getContext()), TimeUnit.MILLISECONDS);
        operationsMap.executeOnKey(key, new EventTopologyUpdater(
                eventWrapper, baseEvent.getEventType(), baseEvent.getSender(), targetList, topic, new Partition(record.partition(), record.offset())));

    } catch (IOException e) {
        log.error("Error While Handling Event:" + e.getMessage(), e);
    }
}
 
Example 2
Source File: AbstractDomainEventProcessor.java    From integration-patterns with MIT License 6 votes vote down vote up
@Transactional
@Override
public EventProcessingState processConsumerRecord(final ConsumerRecord<String, String> consumerRecord) {
    try {
        final E eventMessage = eventParser.parseMessage(consumerRecord.value(), eventType);
        final long version = eventMessage.getVersion();
        final String key = eventMessage.getKey();
        final String topic = consumerRecord.topic();
        if (skipMessage(topic, key, version)) {
            LOG.info("Skipping old {} message with key {} and version {}", topic, key,
                    version);
            return EventProcessingState.SUCCESS;
        }
        final EventProcessingState state = processEvent(eventMessage);
        if (state.isFinalState()) {
            processedEventService.updateLastProcessedVersion(topic, key, version);
        }
        return state;
    } catch (final MessageProcessingException e) {
        LOG.warn("Failed to create valid {} object from {}", eventType.getSimpleName(),
                ConsumerRecordLoggingHelper.toLogSafeString(consumerRecord, topicConfig.isPayloadSensitive()), e);
        return e.getState();
    }
}
 
Example 3
Source File: RoutableProtobufKafkaIngressDeserializer.java    From flink-statefun with Apache License 2.0 6 votes vote down vote up
@Override
public Message deserialize(ConsumerRecord<byte[], byte[]> input) {
  final String topic = input.topic();
  final byte[] payload = input.value();
  final String id = new String(input.key(), StandardCharsets.UTF_8);

  final RoutingConfig routingConfig = routingConfigs.get(topic);
  if (routingConfig == null) {
    throw new IllegalStateException(
        "Consumed a record from topic [" + topic + "], but no routing config was specified.");
  }
  return AutoRoutable.newBuilder()
      .setConfig(routingConfig)
      .setId(id)
      .setPayloadBytes(ByteString.copyFrom(payload))
      .build();
}
 
Example 4
Source File: Messages.java    From ja-micro with Apache License 2.0 6 votes vote down vote up
static Message<? extends com.google.protobuf.Message> fromKafka(com.google.protobuf.Message protoMessage, Envelope envelope, ConsumerRecord<String, byte[]> record) {
    boolean wasReceived = true;

    Topic topic = new Topic(record.topic());
    String partitioningKey = record.key();
    int partitionId = record.partition();
    long offset = record.offset();

    String messageId = envelope.getMessageId();
    String correlationId = envelope.getCorrelationId();

    MessageType type = MessageType.of(protoMessage);

    String requestCorrelationId = envelope.getRequestCorrelationId();
    Topic replyTo = new Topic(envelope.getReplyTo());

    Metadata meta = new Metadata(wasReceived, topic, partitioningKey, partitionId, offset, messageId, correlationId, requestCorrelationId, replyTo, type);
    return new Message<>(protoMessage, meta);
}
 
Example 5
Source File: NewApiTopicConsumer.java    From azeroth with Apache License 2.0 6 votes vote down vote up
@Override
public Boolean call() {

    logger.debug("Number of records received : {}", records.count());
    try {
        for (final ConsumerRecord<String, Serializable> record : records) {
            TopicPartition tp = new TopicPartition(record.topic(), record.partition());
            logger.info("Record received topicPartition : {}, offset : {}", tp,
                record.offset());
            partitionToUncommittedOffsetMap.put(tp, record.offset());

            processConsumerRecords(record);
        }
    } catch (Exception e) {
        logger.error("Error while consuming", e);
    }
    return true;
}
 
Example 6
Source File: SecorKafkaMessageIterator.java    From secor with Apache License 2.0 6 votes vote down vote up
@Override
public Message next() {
    if (mRecordsBatch.isEmpty()) {
        mKafkaConsumer.poll(Duration.ofSeconds(mPollTimeout)).forEach(mRecordsBatch::add);
    }

    if (mRecordsBatch.isEmpty()) {
        return null;
    } else {
        ConsumerRecord<byte[], byte[]> consumerRecord = mRecordsBatch.pop();
        List<MessageHeader> headers = new ArrayList<>();
        consumerRecord.headers().forEach(header -> headers.add(new MessageHeader(header.key(), header.value())));
        return new Message(consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset(),
                consumerRecord.key(), consumerRecord.value(), consumerRecord.timestamp(), headers);
    }
}
 
Example 7
Source File: SyncRecordProcessor.java    From kafka-examples with Apache License 2.0 6 votes vote down vote up
@Override
public boolean process(KafkaConsumer<K, V> consumer, ConsumerRecords<K, V> records) throws InterruptedException {
	
	long lastCommitTimeInMs = System.currentTimeMillis();
	// process records
	for (ConsumerRecord<K, V> record : records) {
		
		TopicPartition tp = new TopicPartition(record.topic(), record.partition());
		logger.info("C : {}, Record received partition : {}, key : {}, value : {}, offset : {}", 
				clientId, tp, record.key(), record.value(), record.offset());
		
		partitionToUncommittedOffsetMap.put(tp, record.offset());
		Thread.sleep(100);
		
		// commit offset of processed messages
		if((System.currentTimeMillis() - lastCommitTimeInMs) > 1000) {
			commit(consumer);
			lastCommitTimeInMs = System.currentTimeMillis();
		}
	}
	commit(consumer); // [OR] consumer.commitSync();
	return true;
}
 
Example 8
Source File: SecorKafkaClient.java    From secor with Apache License 2.0 6 votes vote down vote up
private Message readSingleMessage(KafkaConsumer<byte[], byte[]> kafkaConsumer) {
    int pollAttempts = 0;
    Message message = null;
    while (pollAttempts < MAX_READ_POLL_ATTEMPTS) {
        Iterator<ConsumerRecord<byte[], byte[]>> records = kafkaConsumer.poll(Duration.ofSeconds(mPollTimeout)).iterator();
        if (!records.hasNext()) {
            pollAttempts++;
        } else {
            ConsumerRecord<byte[], byte[]> record = records.next();
            List<MessageHeader> headers = new ArrayList<>();
            record.headers().forEach(header -> headers.add(new MessageHeader(header.key(), header.value())));
            message = new Message(record.topic(), record.partition(), record.offset(), record.key(), record.value(), record.timestamp(), headers);
            break;
        }
    }

    if (message == null) {
        LOG.warn("unable to fetch message after " + MAX_READ_POLL_ATTEMPTS + " Retries");
    }
    return message;
}
 
Example 9
Source File: KafkaMirrorMakerConnectorTask.java    From brooklin with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
protected DatastreamProducerRecord translate(ConsumerRecord<?, ?> fromKafka, Instant readTime) {
  long eventsSourceTimestamp =
      fromKafka.timestampType() == TimestampType.LOG_APPEND_TIME ? fromKafka.timestamp() : readTime.toEpochMilli();
  HashMap<String, String> metadata = new HashMap<>();
  metadata.put(KAFKA_ORIGIN_CLUSTER, _mirrorMakerSource.getBrokerListString());
  String topic = fromKafka.topic();
  metadata.put(KAFKA_ORIGIN_TOPIC, topic);
  int partition = fromKafka.partition();
  String partitionStr = String.valueOf(partition);
  metadata.put(KAFKA_ORIGIN_PARTITION, partitionStr);
  long offset = fromKafka.offset();
  String offsetStr = String.valueOf(offset);
  metadata.put(KAFKA_ORIGIN_OFFSET, offsetStr);
  metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(eventsSourceTimestamp));
  BrooklinEnvelope envelope = new BrooklinEnvelope(fromKafka.key(), fromKafka.value(), null, metadata);
  DatastreamProducerRecordBuilder builder = new DatastreamProducerRecordBuilder();
  builder.addEvent(envelope);
  builder.setEventsSourceTimestamp(eventsSourceTimestamp);
  builder.setSourceCheckpoint(new KafkaMirrorMakerCheckpoint(topic, partition, offset).toString());
  builder.setDestination(_datastreamTask.getDatastreamDestination()
      .getConnectionString()
      .replace(KafkaMirrorMakerConnector.MM_TOPIC_PLACEHOLDER,
          StringUtils.isBlank(_destinationTopicPrefix) ? topic : _destinationTopicPrefix + topic));
  if (_isIdentityMirroringEnabled) {
    builder.setPartition(partition);
  }
  return builder.build();
}
 
Example 10
Source File: MonitorConsumer.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
public MonitorConsumerRecord receive() {
    if (recordIterator == null || !recordIterator.hasNext())
        recordIterator = consumer.poll(Long.MAX_VALUE).iterator();

    ConsumerRecord<String, String> record = recordIterator.next();
    return new MonitorConsumerRecord(record.topic(), record.partition(), record.offset(), record.key(), record.value());
}
 
Example 11
Source File: KafkaRowConverterTest.java    From calcite with Apache License 2.0 5 votes vote down vote up
/**
 * Parse and reformat Kafka message from consumer, to fit with row schema
 * defined as {@link #rowDataType(String)}.
 * @param message, the raw Kafka message record;
 * @return fields in the row
 */
@Override public Object[] toRow(final ConsumerRecord<String, String> message) {
  Object[] fields = new Object[3];
  fields[0] = message.topic();
  fields[1] = message.partition();
  fields[2] = message.timestampType().name;

  return fields;
}
 
Example 12
Source File: OffsetCommitSyncSingle.java    From BigData-In-Practice with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
        KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create();

        try {
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
                for (ConsumerRecord<String, String> record : records) {
                    //do some logical processing.
                    long offset = record.offset();
                    TopicPartition partition = new TopicPartition(record.topic(), record.partition());
                    consumer.commitSync(Collections
                            .singletonMap(partition, new OffsetAndMetadata(offset + 1)));
                }
            }

//            TopicPartition tp1 = new TopicPartition(topic, 0);
//            TopicPartition tp2 = new TopicPartition(topic, 1);
//            TopicPartition tp3 = new TopicPartition(topic, 2);
//            TopicPartition tp4 = new TopicPartition(topic, 3);
//            System.out.println(consumer.committed(tp1) + " : " + consumer.position(tp1));
//            System.out.println(consumer.committed(tp2) + " : " + consumer.position(tp2));
//            System.out.println(consumer.committed(tp3) + " : " + consumer.position(tp3));
//            System.out.println(consumer.committed(tp4) + " : " + consumer.position(tp4));
        } finally {
            consumer.close();
        }
    }
 
Example 13
Source File: KafkaConsumerProxy.java    From samza with Apache License 2.0 5 votes vote down vote up
private Map<SystemStreamPartition, List<IncomingMessageEnvelope>> processResults(ConsumerRecords<K, V> records) {
  if (records == null) {
    throw new SamzaException("Received null 'records' after polling consumer in KafkaConsumerProxy " + this);
  }

  Map<SystemStreamPartition, List<IncomingMessageEnvelope>> results = new HashMap<>(records.count());
  // Parse the returned records and convert them into the IncomingMessageEnvelope.
  for (ConsumerRecord<K, V> record : records) {
    int partition = record.partition();
    String topic = record.topic();
    TopicPartition tp = new TopicPartition(topic, partition);

    updateMetrics(record, tp);

    SystemStreamPartition ssp = topicPartitionToSSP.get(tp);
    List<IncomingMessageEnvelope> messages = results.computeIfAbsent(ssp, k -> new ArrayList<>());

    IncomingMessageEnvelope incomingMessageEnvelope = handleNewRecord(record, ssp);
    messages.add(incomingMessageEnvelope);
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("# records per SSP:");
    for (Map.Entry<SystemStreamPartition, List<IncomingMessageEnvelope>> e : results.entrySet()) {
      List<IncomingMessageEnvelope> list = e.getValue();
      LOG.debug(e.getKey() + " = " + ((list == null) ? 0 : list.size()));
    }
  }

  return results;
}
 
Example 14
Source File: ConsumerRecordConverter.java    From beast with Apache License 2.0 5 votes vote down vote up
public List<Record> convert(final Iterable<ConsumerRecord<byte[], byte[]>> messages) throws InvalidProtocolBufferException {
    ArrayList<Record> records = new ArrayList<>();
    for (ConsumerRecord<byte[], byte[]> message : messages) {
        byte[] value = message.value();
        Map<String, Object> columns = rowMapper.map(parser.parse(value));
        OffsetInfo offsetInfo = new OffsetInfo(message.topic(), message.partition(), message.offset(), message.timestamp());
        addMetadata(columns, offsetInfo);
        records.add(new Record(offsetInfo, columns));
    }
    return records;
}
 
Example 15
Source File: KafkaSourceTask.java    From MirrorTool-for-Kafka-Connect with Apache License 2.0 4 votes vote down vote up
@Override
public List<SourceRecord> poll() {
  if (logger.isDebugEnabled())
    logger.debug("{}: poll()", this);
  synchronized (stopLock) {
    if (!stop.get())
      poll.set(true);
  }
  ArrayList<SourceRecord> records = new ArrayList<>();
  if (poll.get()) {
    try {
      ConsumerRecords<byte[], byte[]> krecords = consumer.poll(Duration.ofMillis(pollTimeout));
      if (logger.isDebugEnabled())
        logger.debug("{}: Got {} records from source.", this, krecords.count());
      for (ConsumerRecord<byte[], byte[]> krecord : krecords) {
        Map<String, String> sourcePartition = Collections.singletonMap(TOPIC_PARTITION_KEY,
            krecord.topic().concat(":").concat(Integer.toString(krecord.partition())));
        Map<String, Long> sourceOffset = Collections.singletonMap(OFFSET_KEY, krecord.offset());
        String sourceTopic = krecord.topic();
        String destinationTopic = sourceTopic;
        byte[] recordKey = krecord.key();
        byte[] recordValue = krecord.value();
        long recordTimestamp = krecord.timestamp();
        if (logger.isDebugEnabled()) {
          logger.trace(
              "Task: sourceTopic:{} sourcePartition:{} sourceOffSet:{} destinationTopic:{}, key:{}, valueSize:{}",
              sourceTopic, krecord.partition(), krecord.offset(), destinationTopic, recordKey,
              krecord.serializedValueSize());
        }
        if (includeHeaders) {
          // Mapping from source type: org.apache.kafka.common.header.Headers, to
          // destination type: org.apache.kafka.connect.Headers
          Headers sourceHeaders = krecord.headers();
          ConnectHeaders destinationHeaders = new ConnectHeaders();
          for (Header header : sourceHeaders) {
            if (header != null) {
              destinationHeaders.add(header.key(), header.value(), Schema.OPTIONAL_BYTES_SCHEMA);
            }
          }
          records.add(
              new SourceRecord(sourcePartition, sourceOffset, destinationTopic, null, Schema.OPTIONAL_BYTES_SCHEMA,
                  recordKey, Schema.OPTIONAL_BYTES_SCHEMA, recordValue, recordTimestamp, destinationHeaders));
        } else {
          records.add(new SourceRecord(sourcePartition, sourceOffset, destinationTopic, null,
              Schema.OPTIONAL_BYTES_SCHEMA, recordKey, Schema.OPTIONAL_BYTES_SCHEMA, recordValue, recordTimestamp));
        }
      }
    } catch (WakeupException e) {
      logger.info("{}: Caught WakeupException. Probably shutting down.", this);
    }
  }
  poll.set(false);
  // If stop has been set processing, then stop the consumer.
  if (stop.get()) {
    logger.debug("{}: stop flag set during poll(), opening stopLatch", this);
    stopLatch.countDown();
  }
  if (logger.isDebugEnabled())
    logger.debug("{}: Returning {} records to connect", this, records.size());
  return records;
}
 
Example 16
Source File: ClientSpanNameProvider.java    From java-kafka-client with Apache License 2.0 4 votes vote down vote up
private static String replaceIfNull(ConsumerRecord input, String replacement) {
  return ((input == null) ? replacement : input.topic());
}
 
Example 17
Source File: DBusConsumerRecord.java    From DBus with Apache License 2.0 4 votes vote down vote up
public DBusConsumerRecord(ConsumerRecord<K, V> record) {
    this(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(),
            record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value());
}
 
Example 18
Source File: MonitorSpoutDataProcessor.java    From DBus with Apache License 2.0 4 votes vote down vote up
@Override
public Object process(Object obj, Supplier ... suppliers) {
    Object ret = new Object();

    ConsumerRecord<String, byte[]> record = (ConsumerRecord) obj;
    logger.info("topic:{}, key:{}, offset:{}", record.topic(), record.key(), record.offset());

    if (!isBelong(record.key()))
        return ret;

    TopicPartition topicPartition = new TopicPartition(record.topic(), record.partition());
    try {
        if (StringUtils.isEmpty(record.key())) {
            logger.warn("topic:{}, offset:{}, key is empty", record.topic(), record.offset());
            return ret;
        }

        // data_increment_heartbeat.mysql.mydb.cbm.t1#router_test_s_r5.6.0.0.1541041451552|1541041451550|ok.wh_placeholder
        String[] vals = StringUtils.split(record.key(), ".");
        if (vals == null || vals.length != 10) {
            logger.error("receive heartbeat key is error. topic:{}, offset:{}, key:{}", record.topic(), record.offset(), record.key());
            return ret;
        }

        long cpTime = 0L;
        long txTime = 0L;
        boolean isTableOK = true;
        String originDsName = StringUtils.EMPTY;

        if (StringUtils.contains(vals[8], "|")) {
            String times[] = StringUtils.split(vals[8], "|");
            cpTime = Long.valueOf(times[0]);
            txTime = Long.valueOf(times[1]);
            // 表明其实表已经abort了,但心跳数据仍然, 这种情况,只发送stat,不更新zk
            if ((times.length == 3 || times.length == 4) && times[2].equals("abort")) {
                isTableOK = false;
                logger.warn("data abort. key:{}", record.key());
            }
            if (times.length == 4)
                originDsName = times[3];
        } else {
            isTableOK = false;
            logger.error("it should not be here. key:{}", record.key());
        }

        if (!isTableOK)
            return ret;

        String dsName = vals[2];
        if (StringUtils.contains(vals[2], "!")) {
            dsName = StringUtils.split(vals[2], "!")[0];
        } else {
            isTableOK = false;
            logger.error("it should not be here. key:{}", record.key());
        }

        if (StringUtils.isNoneBlank(originDsName))
            dsName = originDsName;

        String schemaName = vals[3];
        String tableName = vals[4];

        if (!isTableOK)
            return ret;

        // String dsPartition = vals[6];
        String ns = StringUtils.joinWith(".", dsName, schemaName, tableName);
        String path = StringUtils.joinWith("/", Constants.HEARTBEAT_PROJECT_MONITOR,
                context.getInner().projectName, context.getInner().topologyId, ns);

        // {"node":"/DBus/HeartBeat/ProjectMonitor/db4new/AMQUE/T_USER/0","time":1531180006336,"type":"checkpoint","txTime":1531180004040}
        Packet packet = new Packet();
        packet.setNode(path);
        packet.setType("checkpoint");
        packet.setTime(cpTime);
        packet.setTxTime(txTime);
        cache.put(path, packet);
        logger.info("put cache path:{}", path);

        if (isTimeUp(baseFlushTime)) {
            baseFlushTime = System.currentTimeMillis();
            logger.info("router update zk stat :{}", baseFlushTime);
            flushCache();
        }
        ack(topicPartition, record.offset());
    } catch (Exception e) {
        logger.error("consumer record processor process fail.", e);
        fail(topicPartition, record.offset());
    }
    return ret;
}
 
Example 19
Source File: ConsumerThread.java    From kafka-workers with Apache License 2.0 4 votes vote down vote up
private TopicPartition topicPartition(ConsumerRecord<K, V> record) {
    return new TopicPartition(record.topic(), record.partition());
}
 
Example 20
Source File: DBConsumer.java    From mapr-streams-sample-programs with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws IOException {
  // set up house-keeping
  ObjectMapper mapper = new ObjectMapper();
  Histogram stats = new Histogram(1, 10000000, 2);
  Histogram global = new Histogram(1, 10000000, 2);

  final String TOPIC_FAST_MESSAGES = "/sample-stream:fast-messages";
  final String TOPIC_SUMMARY_MARKERS = "/sample-stream:summary-markers";


  Table fastMessagesTable = getTable("/apps/fast-messages");

  // and the consumer
  KafkaConsumer<String, String> consumer;
  try (InputStream props = Resources.getResource("consumer.props").openStream()) {
    Properties properties = new Properties();
    properties.load(props);
    // use a new group id for the dbconsumer
    if (properties.getProperty("group.id") == null) {
      properties.setProperty("group.id", "group-" + new Random().nextInt(100000));
    } else {
      String groupId = properties.getProperty("group.id");
      properties.setProperty("group.id", "db-" + groupId);
    }

    consumer = new KafkaConsumer<>(properties);
  }
  consumer.subscribe(Arrays.asList(TOPIC_FAST_MESSAGES, TOPIC_SUMMARY_MARKERS));
  int timeouts = 0;

  //noinspection InfiniteLoopStatement
  while (true) {
    // read records with a short timeout. If we time out, we don't really care.
    ConsumerRecords<String, String> records = consumer.poll(200);
    if (records.count() == 0) {
      timeouts++;
    } else {
      System.out.printf("Got %d records after %d timeouts\n", records.count(), timeouts);
      timeouts = 0;
    }
    for (ConsumerRecord<String, String> record : records) {
      switch (record.topic()) {
        case TOPIC_FAST_MESSAGES:
          // the send time is encoded inside the message
          JsonNode msg = mapper.readTree(record.value());
          switch (msg.get("type").asText()) {
            case "test":
              // create a Document and set an _id, in this case the message number (document will be updated each time)
              Document messageDocument = MapRDB.newDocument(msg);
              messageDocument.setId( Integer.toString(messageDocument.getInt("k")));
              fastMessagesTable.insertOrReplace( messageDocument );

              long latency = (long) ((System.nanoTime() * 1e-9 - msg.get("t").asDouble()) * 1000);
              stats.recordValue(latency);
              global.recordValue(latency);
              break;
            case "marker":
              // whenever we get a marker message, we should dump out the stats
              // note that the number of fast messages won't necessarily be quite constant
              System.out.printf("%d messages received in period, latency(min, max, avg, 99%%) = %d, %d, %.1f, %d (ms)\n",
                      stats.getTotalCount(),
                      stats.getValueAtPercentile(0), stats.getValueAtPercentile(100),
                      stats.getMean(), stats.getValueAtPercentile(99));
              System.out.printf("%d messages received overall, latency(min, max, avg, 99%%) = %d, %d, %.1f, %d (ms)\n",
                      global.getTotalCount(),
                      global.getValueAtPercentile(0), global.getValueAtPercentile(100),
                      global.getMean(), global.getValueAtPercentile(99));
              stats.reset();
              break;
            default:
              throw new IllegalArgumentException("Illegal message type: " + msg.get("type"));
          }
          break;
        case TOPIC_SUMMARY_MARKERS:
          break;
        default:
          throw new IllegalStateException("Shouldn't be possible to get message on topic " + record.topic());
      }
    }
  }
}