Java Code Examples for kafka.consumer.ConsumerIterator#next()

The following examples show how to use kafka.consumer.ConsumerIterator#next() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaMqCollect.java    From light_drtc with Apache License 2.0 6 votes vote down vote up
public void collectMq(){
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
       topicCountMap.put(Constants.kfTopic, new Integer(1));

       StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
       StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

       Map<String, List<KafkaStream<String, String>>> consumerMap =
               consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
       
       KafkaStream<String, String> stream = consumerMap.get(Constants.kfTopic).get(0);
       ConsumerIterator<String, String> it = stream.iterator();
       MessageAndMetadata<String, String> msgMeta;
       while (it.hasNext()){
       	msgMeta = it.next();
       	super.mqTimer.parseMqText(msgMeta.key(), msgMeta.message());
       	//System.out.println(msgMeta.key()+"\t"+msgMeta.message());
       }
}
 
Example 2
Source File: KafkaConsumer.java    From flume-ng-kafka-sink with Apache License 2.0 6 votes vote down vote up
public MessageAndMetadata getNextMessage(String topic){
    List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
    KafkaStream stream = streams.get(0); // it has only a single stream, because there is only one consumer
    final ConsumerIterator<byte[], byte[]> it = stream.iterator();
    int counter = 0;
    while (!it.hasNext()){
        // Wait time >= 10s, so return null and exit
        if(counter == 5){
            logger.error("0 messages available to fetch for the topic " + topic);
            return null;
        }
        // wait till a message is published. this is a blocking call.
        try {
            Thread.sleep(2 * 1000);
        } catch (InterruptedException e) {
            // ignore
        }
        counter++;
    }
    return it.next();
}
 
Example 3
Source File: ConsumerWorker.java    From yuzhouwan with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
    ConsumerIterator<byte[], byte[]> iter = kafkaStream.iterator();
    MessageAndMetadata<byte[], byte[]> msg;
    int total = 0, fail = 0, success = 0;
    long start = System.currentTimeMillis();
    while (iter.hasNext()) {
        try {
            msg = iter.next();
            _log.info("Thread {}: {}", threadNum, new String(msg.message(), StandardCharsets.UTF_8));
            _log.info("partition: {}, offset: {}", msg.partition(), msg.offset());
            success++;
        } catch (Exception e) {
            _log.error("", e);
            fail++;
        }
        _log.info("Count [fail/success/total]: [{}/{}/{}], Time: {}s", fail, success, ++total,
                (System.currentTimeMillis() - start) / 1000);
    }
}
 
Example 4
Source File: KafkaConsumer.java    From sqoop-on-spark with Apache License 2.0 6 votes vote down vote up
public MessageAndMetadata getNextMessage(String topic) {
  List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
  // it has only a single stream, because there is only one consumer
  KafkaStream stream = streams.get(0);
  final ConsumerIterator<byte[], byte[]> it = stream.iterator();
  int counter = 0;
  try {
    if (it.hasNext()) {
      return it.next();
    } else {
      return null;
    }
  } catch (ConsumerTimeoutException e) {
    logger.error("0 messages available to fetch for the topic " + topic);
    return null;
  }
}
 
Example 5
Source File: Processer.java    From blog_demos with Apache License 2.0 6 votes vote down vote up
public void run() {
    // 1. 获取数据迭代器
    ConsumerIterator<String, String> iter = this.stream.iterator();

    logger.info("server [{}] start run", TOMCAT_ID);

    // 2. 迭代输出数据
    while (iter.hasNext()) {
        // 2.1 获取数据值
        MessageAndMetadata value = iter.next();

        // 2.2 输出
        logger.info("server [{}], threadNumber [{}], offset [{}], key [{}], message[{}]",
                TOMCAT_ID,
                threadNumber,
                value.offset(),
                value.key(),
                value.message());
    }
    // 3. 表示当前线程执行完成
    logger.info("Shutdown Thread:" + this.threadNumber);
}
 
Example 6
Source File: KafkaOffsetGetter.java    From Kafka-Insight with Apache License 2.0 6 votes vote down vote up
/**
 * When an object implementing interface <code>Runnable</code> is used
 * to create a thread, starting the thread causes the object's
 * <code>run</code> method to be called in that separately executing
 * thread.
 * <p>
 * The general contract of the method <code>run</code> is that it may
 * take any action whatsoever.
 *
 * @see Thread#run()
 */
@Override
public void run() {
    ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1));
    KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0);

    ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator();
    while (true) {

        MessageAndMetadata<byte[], byte[]> offsetMsg = it.next();
        if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) {
            try {
                GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key()));
                if (offsetMsg.message() == null) {
                    continue;
                }
                kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message()));
                kafkaConsumerOffsets.put(commitKey, commitValue);
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }
}
 
Example 7
Source File: KafkaWorker.java    From elasticsearch-river-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Consumes the messages from the partition via specified stream.
 */
private void consumeMessagesAndAddToBulkProcessor(final KafkaStream stream) {

    try {
        // by default it waits forever for message, but there is timeout configured
        final ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator();

        // Consume all the messages of the stream (partition)
        while (consumerIterator.hasNext() && consume) {

            final MessageAndMetadata messageAndMetadata = consumerIterator.next();
            logMessage(messageAndMetadata);

            elasticsearchProducer.addMessagesToBulkProcessor(messageAndMetadata);

            // StatsD reporting
            stats.messagesReceived.incrementAndGet();
            stats.lastCommitOffsetByPartitionId.put(messageAndMetadata.partition(), messageAndMetadata.offset());
        }
    } catch (ConsumerTimeoutException ex) {
        logger.debug("Nothing to be consumed for now. Consume flag is: {}", consume);
    }
}
 
Example 8
Source File: KafkaPublisherTest.java    From nifi with Apache License 2.0 6 votes vote down vote up
@Test
public void validateSuccessfulSendAsWhole() throws Exception {
    InputStream contentStream = new ByteArrayInputStream("Hello Kafka".getBytes(StandardCharsets.UTF_8));
    String topicName = "validateSuccessfulSendAsWhole";

    Properties kafkaProperties = this.buildProducerProperties();
    KafkaPublisher publisher = new KafkaPublisher(kafkaProperties, mock(ComponentLog.class));

    PublishingContext publishingContext = new PublishingContext(contentStream, topicName);
    KafkaPublisherResult result = publisher.publish(publishingContext);

    assertEquals(0, result.getLastMessageAcked());
    assertEquals(1, result.getMessagesSent());
    contentStream.close();
    publisher.close();

    ConsumerIterator<byte[], byte[]> iter = this.buildConsumer(topicName);
    assertNotNull(iter.next());
    try {
        iter.next();
    } catch (ConsumerTimeoutException e) {
        // that's OK since this is the Kafka mechanism to unblock
    }
}
 
Example 9
Source File: JavaKafkaConsumerHighAPIHbaseImpl.java    From dk-fitting with Apache License 2.0 6 votes vote down vote up
public void run() {
            // 1. 获取数据迭代器
            ConsumerIterator<String, String> iter = this.stream.iterator();
            // 2. 迭代输出数据
            while (iter.hasNext()) {
                // 2.1 获取数据值
                MessageAndMetadata value = iter.next();

                // 2.2 输出
//                logger.info(this.threadNumber + ":" + value.offset() + ":"  + value.key() + ":" + value.message());
//                System.out.println(this.threadNumber + ":" + value.offset() + ":"  + value.key() + ":" + value.message());

                try {

                    HbaseUtils.insertData(providerProp.getProperty("consumer.hbase.tablename"),
                            providerProp.getProperty("consumer.hbase.columnFamilyName"),value.message().toString());

                } catch (Exception e) {
                    e.printStackTrace();
                }

            }
            // 3. 表示当前线程执行完成
            logger.info("Shutdown Thread:" + this.threadNumber);
        }
 
Example 10
Source File: JavaKafkaConsumerHighAPIESImpl.java    From dk-fitting with Apache License 2.0 6 votes vote down vote up
public void run() {
            // 1. 获取数据迭代器
            ConsumerIterator<String, String> iter = this.stream.iterator();
            // 2. 迭代输出数据
            while (iter.hasNext()) {
                // 2.1 获取数据值
                MessageAndMetadata value = iter.next();

                // 2.2 输出
//                logger.info(this.threadNumber + ":" + value.offset() + ":"  + value.key() + ":" + value.message());
//                System.out.println(this.threadNumber + ":" + value.offset() + ":"  + value.key() + ":" + value.message());

                try {
                    ElasticsearchUtils.sendToES(esIps,esPort,esClusterName,
                            indexName,typeName,
                            value.message().toString(),providerProp.getProperty("consumer.es.kafkaMessage.separator"),
                            Boolean.parseBoolean(providerProp.getProperty("consumer.es.kafkaMessage.isJsonMessage")));
                } catch (Exception e) {
                    e.printStackTrace();
                }

            }
            // 3. 表示当前线程执行完成
            logger.info("Shutdown Thread:" + this.threadNumber);
        }
 
Example 11
Source File: PutKafkaTest.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
@Test
public void validateDemarcationIntoEmptyMessages() {
    String topicName = "validateDemarcationIntoEmptyMessages";
    PutKafka putKafka = new PutKafka();
    final TestRunner runner = TestRunners.newTestRunner(putKafka);
    runner.setProperty(PutKafka.TOPIC, topicName);
    runner.setProperty(PutKafka.KEY, "key1");
    runner.setProperty(PutKafka.CLIENT_NAME, "foo");
    runner.setProperty(PutKafka.SEED_BROKERS, "localhost:" + kafkaLocal.getKafkaPort());
    runner.setProperty(PutKafka.MESSAGE_DELIMITER, "\n");

    final byte[] bytes = "\n\n\n1\n2\n\n\n3\n4\n\n\n".getBytes(StandardCharsets.UTF_8);
    runner.enqueue(bytes);
    runner.run(1);

    runner.assertAllFlowFilesTransferred(PutKafka.REL_SUCCESS, 1);

    ConsumerIterator<byte[], byte[]> consumer = this.buildConsumer(topicName);

    assertNotNull(consumer.next());
    assertNotNull(consumer.next());
    assertNotNull(consumer.next());
    assertNotNull(consumer.next());
    try {
        consumer.next();
        fail();
    } catch (Exception e) {
        // ignore
    }
}
 
Example 12
Source File: kafkaConsumer.java    From Transwarp-Sample-Code with MIT License 5 votes vote down vote up
public void run() {
    ConsumerIterator<byte[], byte[]> it = stream.iterator();
    while (it.hasNext()) {
        MessageAndMetadata<byte[],byte[]> next = it.next();
        System.out.println(Thread.currentThread().getName() + ": partition[" + next.partition() + "],"
                + "offset[" + next.offset() + "], " + new String(next.message()));
    }
}
 
Example 13
Source File: KafkaDemoClient.java    From iotplatform with Apache License 2.0 5 votes vote down vote up
private static void startConsumer() throws InterruptedException {
    ConsumerIterator<String, String> it = buildConsumer(CONSUMER_TOPIC);
    do {
      if (it.hasNext()) {
          MessageAndMetadata<String, String> messageAndMetadata = it.next();
          System.out.println(String.format("Kafka message [%s]", messageAndMetadata.message()));
      }
      Thread.sleep(100);
    } while (true);
}
 
Example 14
Source File: KafkaPublisherTest.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Test
public void validateSuccessfulSendAsDelimited() throws Exception {
    InputStream contentStream = new ByteArrayInputStream(
            "Hello Kafka\nHello Kafka\nHello Kafka\nHello Kafka\n".getBytes(StandardCharsets.UTF_8));
    String topicName = "validateSuccessfulSendAsDelimited";

    Properties kafkaProperties = this.buildProducerProperties();
    KafkaPublisher publisher = new KafkaPublisher(kafkaProperties, mock(ComponentLog.class));

    PublishingContext publishingContext = new PublishingContext(contentStream, topicName);
    publishingContext.setDelimiterBytes("\n".getBytes(StandardCharsets.UTF_8));
    KafkaPublisherResult result = publisher.publish(publishingContext);

    assertEquals(3, result.getLastMessageAcked());
    assertEquals(4, result.getMessagesSent());
    contentStream.close();
    publisher.close();

    ConsumerIterator<byte[], byte[]> iter = this.buildConsumer(topicName);
    assertNotNull(iter.next());
    assertNotNull(iter.next());
    assertNotNull(iter.next());
    assertNotNull(iter.next());
    try {
        iter.next();
        fail();
    } catch (ConsumerTimeoutException e) {
        // that's OK since this is the Kafka mechanism to unblock
    }
}
 
Example 15
Source File: DemoHighLevelConsumer.java    From KafkaExample with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
	args = new String[] { "zookeeper0:2181/kafka", "topic1", "group2", "consumer1" };
	if (args == null || args.length != 4) {
		System.err.println("Usage:\n\tjava -jar kafka_consumer.jar ${zookeeper_list} ${topic_name} ${group_name} ${consumer_id}");
		System.exit(1);
	}
	String zk = args[0];
	String topic = args[1];
	String groupid = args[2];
	String consumerid = args[3];
	Properties props = new Properties();
	props.put("zookeeper.connect", zk);
	props.put("group.id", groupid);
	props.put("client.id", "test");
	props.put("consumer.id", consumerid);
	props.put("auto.offset.reset", "largest");
	props.put("auto.commit.enable", "false");
	props.put("auto.commit.interval.ms", "60000");

	ConsumerConfig consumerConfig = new ConsumerConfig(props);
	ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);

	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(topic, 1);
	Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicCountMap);

	KafkaStream<byte[], byte[]> stream1 = consumerMap.get(topic).get(0);
	ConsumerIterator<byte[], byte[]> interator = stream1.iterator();
	while (interator.hasNext()) {
		MessageAndMetadata<byte[], byte[]> messageAndMetadata = interator.next();
		String message = String.format(
				"Topic:%s, GroupID:%s, Consumer ID:%s, PartitionID:%s, Offset:%s, Message Key:%s, Message Payload: %s",
				messageAndMetadata.topic(), groupid, consumerid, messageAndMetadata.partition(),
				messageAndMetadata.offset(), new String(messageAndMetadata.key()),
				new String(messageAndMetadata.message()));
		System.out.println(message);
		consumerConnector.commitOffsets();
	}
}
 
Example 16
Source File: KafkaMessageReceiverPool.java    From message-queue-client-framework with Apache License 2.0 5 votes vote down vote up
@Override
public void run() {

    logger.info(Thread.currentThread().getName() + " clientId: "
            + stream.clientId() + " start.");

    ConsumerIterator<K, V> it = stream.iterator();

    while (it.hasNext()) {

        MessageAndMetadata<K, V> messageAndMetadata = it.next();

        try {
            this.adapter.messageAdapter(messageAndMetadata);

        } catch (MQException e) {

            if (receiverRetry != null)

                receiverRetry.receiveMessageRetry(messageAndMetadata);

            logger.error("Receive message failed."
                    + " topic: " + messageAndMetadata.topic()
                    + " offset: " + messageAndMetadata.offset()
                    + " partition: " + messageAndMetadata.partition(), e);

        } finally {

            /* commitOffsets */
            if (!getAutoCommit()) {
                consumer.commitOffsets(Collections.singletonMap(
                        TopicAndPartition.apply(messageAndMetadata.topic(), messageAndMetadata.partition()),
                        OffsetAndMetadata.apply(messageAndMetadata.offset() + 1)), true);
            }
        }
    }

    logger.info(Thread.currentThread().getName() + " clientId: " + stream.clientId() + " end.");
}
 
Example 17
Source File: KafkaKeyValueProducerPusherTest.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
@Test
public void test() throws IOException {
  // Test that the scoped config overrides the generic config
  Pusher pusher = new KafkaKeyValueProducerPusher<byte[], byte[]>("127.0.0.1:dummy", TOPIC,
      Optional.of(ConfigFactory.parseMap(ImmutableMap.of(
          ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:" + this.kafkaTestHelper.getKafkaServerPort()))));

  String msg1 = "msg1";
  String msg2 = "msg2";

  pusher.pushMessages(Lists.newArrayList(Pair.of("key1", msg1.getBytes()), Pair.of("key2", msg2.getBytes())));

  try {
    Thread.sleep(1000);
  } catch(InterruptedException ex) {
    Thread.currentThread().interrupt();
  }

  ConsumerIterator<byte[], byte[]> iterator = this.kafkaTestHelper.getIteratorForTopic(TOPIC);

  assert(iterator.hasNext());

  MessageAndMetadata<byte[], byte[]> messageAndMetadata = iterator.next();

  Assert.assertEquals(new String(messageAndMetadata.key()), "key1");
  Assert.assertEquals(new String(messageAndMetadata.message()), msg1);
  assert(iterator.hasNext());

  messageAndMetadata = iterator.next();
  Assert.assertEquals(new String(messageAndMetadata.key()), "key2");
  Assert.assertEquals(new String(messageAndMetadata.message()), msg2);

  pusher.close();
}
 
Example 18
Source File: PutKafkaTest.java    From nifi with Apache License 2.0 5 votes vote down vote up
@Test
public void validateDemarcationIntoEmptyMessages() {
    String topicName = "validateDemarcationIntoEmptyMessages";
    PutKafka putKafka = new PutKafka();
    final TestRunner runner = TestRunners.newTestRunner(putKafka);
    runner.setProperty(PutKafka.TOPIC, topicName);
    runner.setProperty(PutKafka.KEY, "key1");
    runner.setProperty(PutKafka.CLIENT_NAME, "foo");
    runner.setProperty(PutKafka.SEED_BROKERS, "localhost:" + kafkaLocal.getKafkaPort());
    runner.setProperty(PutKafka.MESSAGE_DELIMITER, "\n");

    final byte[] bytes = "\n\n\n1\n2\n\n\n3\n4\n\n\n".getBytes(StandardCharsets.UTF_8);
    runner.enqueue(bytes);
    runner.run(1);

    runner.assertAllFlowFilesTransferred(PutKafka.REL_SUCCESS, 1);

    ConsumerIterator<byte[], byte[]> consumer = this.buildConsumer(topicName);

    assertNotNull(consumer.next());
    assertNotNull(consumer.next());
    assertNotNull(consumer.next());
    assertNotNull(consumer.next());
    try {
        consumer.next();
        fail();
    } catch (Exception e) {
        // ignore
    }
}
 
Example 19
Source File: ThrottlingManagerEstimatorConsumerFactory.java    From warp10-platform with Apache License 2.0 4 votes vote down vote up
@Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
  
  return new Runnable() {          
    @Override
    public void run() {
      ConsumerIterator<byte[],byte[]> iter = stream.iterator();

      // Iterate on the messages
      TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());

      KafkaOffsetCounters counters = pool.getCounters();
      
      try {
        while (iter.hasNext()) {
          //
          // Since the call to 'next' may block, we need to first
          // check that there is a message available
          //
          
          boolean nonEmpty = iter.nonEmpty();
          
          if (nonEmpty) {
            MessageAndMetadata<byte[], byte[]> msg = iter.next();
            counters.count(msg.partition(), msg.offset());
            
            byte[] data = msg.message();

            Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_KAFKA_THROTTLING_IN_MESSAGES, Sensision.EMPTY_LABELS, 1);
            Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_KAFKA_THROTTLING_IN_BYTES, Sensision.EMPTY_LABELS, data.length);
            
            if (null != macKey) {
              data = CryptoUtils.removeMAC(macKey, data);
            }
            
            // Skip data whose MAC was not verified successfully
            if (null == data) {
              Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_KAFKA_THROTTLING_IN_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
              continue;
            }

            //
            // Update throttling manager
            //
            
            try {
              ThrottlingManager.fuse(HyperLogLogPlus.fromBytes(data));
              Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_THROTLLING_FUSIONS, Sensision.EMPTY_LABELS, 1);
            } catch (Exception e) {
              Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_THROTLLING_FUSIONS_FAILED, Sensision.EMPTY_LABELS, 1);
            }
          }
        }        
      } catch (Throwable t) {
        t.printStackTrace(System.err);
      } finally {
        // Set abort to true in case we exit the 'run' method
        pool.getAbort().set(true);
      }
    }
  };
}
 
Example 20
Source File: PlasmaBackEnd.java    From warp10-platform with Apache License 2.0 4 votes vote down vote up
@Override
public void run() {
  long count = 0L;
  
  byte[] clslbls = new byte[16];
  
  try {
    ConsumerIterator<byte[],byte[]> iter = this.stream.iterator();

    byte[] inSipHashKey = backend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_BACKEND_IN);
    byte[] inAESKey = backend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_BACKEND_IN);

    byte[] outSipHashKey = backend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_BACKEND_OUT);
    byte[] outAESKey = backend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_BACKEND_OUT);

    // Iterate on the messages
    TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());

    // TODO(hbs): allow setting of writeBufferSize

    while (iter.hasNext()) {
      //
      // Since the cal to 'next' may block, we need to first
      // check that there is a message available
      //
      
      boolean nonEmpty = iter.nonEmpty();
      
      if (nonEmpty) {
        count++;
        MessageAndMetadata<byte[], byte[]> msg = iter.next();
        counters.count(msg.partition(), msg.offset());
        
        // Do nothing if there are no subscriptions
        if (null == backend.subscriptions || backend.subscriptions.isEmpty()) {
          continue;
        }
        
        byte[] data = msg.message();

        Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_BACKEND_KAFKA_IN_MESSAGES, Sensision.EMPTY_LABELS, 1);
        Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_BACKEND_KAFKA_IN_BYTES, Sensision.EMPTY_LABELS, data.length);
        
        if (null != inSipHashKey) {
          data = CryptoUtils.removeMAC(inSipHashKey, data);
        }
        
        // Skip data whose MAC was not verified successfully
        if (null == data) {
          Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_BACKEND_KAFKA_IN_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
          continue;
        }
        
        // Unwrap data if need be
        if (null != inAESKey) {
          data = CryptoUtils.unwrap(inAESKey, data);
        }
        
        // Skip data that was not unwrapped successfully
        if (null == data) {
          Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_BACKEND_KAFKA_IN_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
          continue;
        }
        
        //
        // Extract KafkaDataMessage
        //
        
        KafkaDataMessage tmsg = new KafkaDataMessage();
        deserializer.deserialize(tmsg, data);
        
        switch(tmsg.getType()) {
          case STORE:
            backend.dispatch(clslbls, msg, tmsg, outSipHashKey, outAESKey);              
            break;
          case DELETE:
            break;
          default:
            throw new RuntimeException("Invalid message type.");
        }            
      } else {
        // Sleep a tiny while
        try {
          Thread.sleep(1L);
        } catch (InterruptedException ie) {             
        }
      }          
    }        
  } catch (Throwable t) {
    t.printStackTrace(System.err);
  } finally {
    // Set abort to true in case we exit the 'run' method
    backend.abort.set(true);
  }
}