Java Code Examples for kafka.message.MessageAndMetadata#message()

The following examples show how to use kafka.message.MessageAndMetadata#message() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaOffsetGetter.java    From Kafka-Insight with Apache License 2.0 6 votes vote down vote up
/**
 * When an object implementing interface <code>Runnable</code> is used
 * to create a thread, starting the thread causes the object's
 * <code>run</code> method to be called in that separately executing
 * thread.
 * <p>
 * The general contract of the method <code>run</code> is that it may
 * take any action whatsoever.
 *
 * @see Thread#run()
 */
@Override
public void run() {
    ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1));
    KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0);

    ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator();
    while (true) {

        MessageAndMetadata<byte[], byte[]> offsetMsg = it.next();
        if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) {
            try {
                GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key()));
                if (offsetMsg.message() == null) {
                    continue;
                }
                kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message()));
                kafkaConsumerOffsets.put(commitKey, commitValue);
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }
}
 
Example 2
Source File: LegacyKafkaMessageIterator.java    From secor with Apache License 2.0 6 votes vote down vote up
@Override
public Message next() {
    MessageAndMetadata<byte[], byte[]> kafkaMessage;
    try {
        kafkaMessage = mIterator.next();
    } catch (ConsumerTimeoutException e) {
        throw new LegacyConsumerTimeoutException(e);
    }

    long timestamp = 0L;
    if (mConfig.useKafkaTimestamp()) {
        timestamp = mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(kafkaMessage);
    }

    return new Message(kafkaMessage.topic(), kafkaMessage.partition(),
            kafkaMessage.offset(), kafkaMessage.key(),
            kafkaMessage.message(), timestamp, null);
}
 
Example 3
Source File: KafkaSinkTest.java    From flume-ng-kafka-sink with Apache License 2.0 6 votes vote down vote up
@Test
public void testPreprocessorForCustomMessageBody(){
    Context context = prepareDefaultContext();
    // configure the static topic
    context.put(Constants.TOPIC, TestConstants.STATIC_TOPIC);
    // configure the preprocessor
    context.put(Constants.PREPROCESSOR, "com.thilinamb.flume.sink.preprocessor.ModifyMessageBodyPreprocessor");
    String msg = "original-message-body";

    try {
        Sink.Status status = prepareAndSend(context, msg);
        if (status == Sink.Status.BACKOFF) {
            fail("Error Occurred");
        }
    } catch (EventDeliveryException ex) {
        // ignore
    }
    // when the message is modified from the preprocessor, it should be published
    // to the custom topic.
    MessageAndMetadata message = testUtil.getNextMessageFromConsumer(
            TestConstants.STATIC_TOPIC);
    String msgBody = new String((byte[]) message.message());
    // check the message body.
    assertEquals(TestConstants.CUSTOM_MSG_BODY, msgBody);
}
 
Example 4
Source File: KafkaSinkTest.java    From flume-ng-kafka-sink with Apache License 2.0 6 votes vote down vote up
@Test
public void testPreprocessorForCustomTopic(){
    Context context = prepareDefaultContext();
    // configure the static topic
    context.put(Constants.TOPIC, TestConstants.STATIC_TOPIC);
    // configure the preprocessor
    context.put(Constants.PREPROCESSOR, "com.thilinamb.flume.sink.preprocessor.ModifyTopicPreprocessor");
    String msg = "custom-topic-test";

    try {
        Sink.Status status = prepareAndSend(context, msg);
        if (status == Sink.Status.BACKOFF) {
            fail("Error Occurred");
        }
    } catch (EventDeliveryException ex) {
        // ignore
    }
    // when the message is modified from the preprocessor, it should be published
    // to the custom topic.
    MessageAndMetadata message = testUtil.getNextMessageFromConsumer(
            TestConstants.CUSTOM_TOPIC);
    String msgBody = new String((byte[]) message.message());
    // check the message body. Topic has already been verified by consuming the message from the custom topic.
    assertEquals(msg, msgBody);
}
 
Example 5
Source File: KafkaSinkTest.java    From flume-ng-kafka-sink with Apache License 2.0 6 votes vote down vote up
@Test
public void testPreprocessorForCustomKey(){
    Context context = prepareDefaultContext();
    // configure the static topic
    context.put(Constants.TOPIC, TestConstants.STATIC_TOPIC);
    // configure the preprocessor
    context.put(Constants.PREPROCESSOR, "com.thilinamb.flume.sink.preprocessor.ModifyKeyPreprocessor");
    String msg = "custom-key-test";

    try {
        Sink.Status status = prepareAndSend(context, msg);
        if (status == Sink.Status.BACKOFF) {
            fail("Error Occurred");
        }
    } catch (EventDeliveryException ex) {
        // ignore
    }

    MessageAndMetadata message = testUtil.getNextMessageFromConsumer(
            TestConstants.STATIC_TOPIC);
    String msgBody = new String((byte[]) message.message());
    // check the message body and the key. Only the key should be changed. topic has already been verified by
    // consuming from the correct topic.
    assertEquals(msg, msgBody);
    assertEquals(TestConstants.CUSTOM_KEY, new String((byte[])message.key()));
}
 
Example 6
Source File: DeleteDocumentProducer.java    From elasticsearch-river-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * For the given messages creates delete document requests and adds them to the bulk processor queue, for
 * processing later when the size of bulk actions is reached.
 *
 * @param messageAndMetadata given message
 */
public void addMessagesToBulkProcessor(final MessageAndMetadata messageAndMetadata) {

    final byte[] messageBytes = (byte[]) messageAndMetadata.message();

    if (messageBytes == null || messageBytes.length == 0) return;

    try {
        final Map<String, Object> messageMap = reader.readValue(messageBytes);

        if(messageMap.containsKey("id")) {
            String id = (String)messageMap.get("id");

            final DeleteRequest request = Requests.deleteRequest(riverConfig.getIndexName()).
                    type(riverConfig.getTypeName()).
                    id(id);

            bulkProcessor.add(request);
        } else {
            throw new IllegalArgumentException("No id provided in a message to delete a document from EL.");
        }
    } catch (Exception ex) {
        ex.printStackTrace();
    }
}
 
Example 7
Source File: KafkaConsumer08.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
public MessageAndOffset read() throws StageException {
  try {
    //has next blocks indefinitely if consumer.timeout.ms is set to -1
    //But if consumer.timeout.ms is set to a value, like 6000, a ConsumerTimeoutException is thrown
    //if no message is written to kafka topic in that time.
    if(consumerIterator.hasNext()) {
      MessageAndMetadata<byte[], byte[]> messageAndMetadata = consumerIterator.next();
      byte[] message = messageAndMetadata.message();
      long offset = messageAndMetadata.offset();
      int partition = messageAndMetadata.partition();
      return new MessageAndOffset(messageAndMetadata.key(), message, offset, partition);
    }
    return null;
  } catch (ConsumerTimeoutException e) {
    /*For high level consumer the fetching logic is handled by a background
      fetcher thread and is hidden from user, for either case of
      1) broker down or
      2) no message is available
      the fetcher thread will keep retrying while the user thread will wait on the fetcher thread to put some
      data into the buffer until timeout. So in a sentence the high-level consumer design is to
      not let users worry about connect / reconnect issues.*/
    return null;
  }
}
 
Example 8
Source File: KafkaIndexingManager.java    From linden with Apache License 2.0 6 votes vote down vote up
public KafkaIndexingManager(final LindenConfig lindenConfig, ShardingStrategy shardingStrategy,
                            LindenCore lindenCore, DataProvider<MessageAndMetadata<byte[], byte[]>> provider) {
  super(provider, lindenConfig, lindenCore, new Function<MessageAndMetadata<byte[], byte[]>, LindenIndexRequest>() {
    @Override
    public LindenIndexRequest apply(MessageAndMetadata<byte[], byte[]> messageAndMetadata) {
      LindenIndexRequest indexRequest = null;
      long offset = messageAndMetadata.offset();
      long partition = messageAndMetadata.partition();
      String message = new String(messageAndMetadata.message());
      try {
        indexRequest = LindenIndexRequestParser.parse(lindenConfig.getSchema(), message);
        LOGGER.info("Parse index request : id={}, route={}, type={}, content({}/{})={}", indexRequest.getId(),
                    indexRequest.getRouteParam(), indexRequest.getType(), partition, offset, message);
      } catch (IOException e) {
        LOGGER.error("Parse index request failed : {} - {}", message, Throwables.getStackTraceAsString(e));
      }
      return indexRequest;
    }
  }, shardingStrategy);
}
 
Example 9
Source File: AlertKafkaPublisherTest.java    From eagle with Apache License 2.0 5 votes vote down vote up
private static void consumeWithOutput(final List<String> outputMessages) {
    Thread t = new Thread(new Runnable() {
        @Override
        public void run() {
            Properties props = new Properties();
            props.put("group.id", "B");
            props.put("zookeeper.connect", "127.0.0.1:" + + TEST_KAFKA_ZOOKEEPER_PORT);
            props.put("zookeeper.session.timeout.ms", "4000");
            props.put("zookeeper.sync.time.ms", "2000");
            props.put("auto.commit.interval.ms", "1000");
            props.put("auto.offset.reset", "smallest");

            ConsumerConnector jcc = null;
            try {
                ConsumerConfig ccfg = new ConsumerConfig(props);
                jcc = Consumer.createJavaConsumerConnector(ccfg);
                Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
                topicCountMap.put(TEST_TOPIC_NAME, 1);
                Map<String, List<KafkaStream<byte[], byte[]>>> topicMap = jcc.createMessageStreams(topicCountMap);
                KafkaStream<byte[], byte[]> cstrm = topicMap.get(TEST_TOPIC_NAME).get(0);
                for (MessageAndMetadata<byte[], byte[]> mm : cstrm) {
                    String message = new String(mm.message());
                    outputMessages.add(message);

                    try {
                        Thread.sleep(5000);
                    } catch (InterruptedException e) {
                    }
                }
            } finally {
                if (jcc != null) {
                    jcc.shutdown();
                }
            }
        }
    });
    t.start();
}
 
Example 10
Source File: KafkaConsumerThread.java    From incubator-iotdb with Apache License 2.0 5 votes vote down vote up
public void run() {
  for (MessageAndMetadata<String, String> consumerIterator : stream) {
    String uploadMessage = consumerIterator.message();
    logger.info(String.format("%s from partiton[%d]: %s", Thread.currentThread().getName(),
        consumerIterator.partition(), uploadMessage));
    writeData(uploadMessage);
  }
}
 
Example 11
Source File: MessageResource.java    From dropwizard-kafka-http with Apache License 2.0 5 votes vote down vote up
public Message(MessageAndMetadata<byte[], byte[]> message) {
    this.topic = message.topic();

    this.key = message.key() != null ? new String(message.key(), Charset.forName("utf-8")) : null;
    this.message = new String(message.message(), Charset.forName("utf-8"));

    this.partition = message.partition();
    this.offset = message.offset();
}
 
Example 12
Source File: FastKafkaSource.java    From fraud-detection-tutorial with Apache License 2.0 4 votes vote down vote up
public Status process() throws EventDeliveryException {
  long batchStartTime = System.currentTimeMillis();
  long batchEndTime = System.currentTimeMillis() + (long)this.timeUpperLimit;

  try {
    boolean e = false;
    long startTime = System.nanoTime();

    while(this.eventList.size() < this.batchUpperLimit && System.currentTimeMillis() < batchEndTime) {
      e = this.hasNext();
      if(e) {
        MessageAndMetadata endTime = this.it.next();
        byte[] kafkaMessage = (byte[])endTime.message();
        byte[] kafkaKey = (byte[])endTime.key();
        HashMap headers = new HashMap();
        headers.put("timestamp", String.valueOf(System.currentTimeMillis()));
        headers.put("topic", this.topic);
        if(kafkaKey != null) {
          headers.put("key", new String(kafkaKey));
        }

        if(log.isDebugEnabled()) {
          log.debug("Message: {}", new String(kafkaMessage));
        }

        Event event = EventBuilder.withBody(kafkaMessage, headers);
        this.eventList.add(event);
      }

      if(log.isDebugEnabled()) {
        log.debug("Waited: {} ", Long.valueOf(System.currentTimeMillis() - batchStartTime));
        log.debug("Event #: {}", Integer.valueOf(this.eventList.size()));
      }
    }

    long endTime1 = System.nanoTime();
    this.counter.addToKafkaEventGetTimer((endTime1 - startTime) / 1000000L);
    this.counter.addToEventReceivedCount(Long.valueOf((long)this.eventList.size()).longValue());
    if(this.eventList.size() > 0) {
      this.getChannelProcessor().processEventBatch(this.eventList);
      this.counter.addToEventAcceptedCount((long)this.eventList.size());
      this.eventList.clear();
      if(log.isDebugEnabled()) {
        log.debug("Wrote {} events to channel", Integer.valueOf(this.eventList.size()));
      }

      if(!this.kafkaAutoCommitEnabled) {
        long commitStartTime = System.nanoTime();
        this.consumer.commitOffsets();
        long commitEndTime = System.nanoTime();
        this.counter.addToKafkaCommitTimer((commitEndTime - commitStartTime) / 1000000L);
      }
    }

    if(!e) {
      if(log.isDebugEnabled()) {
        this.counter.incrementKafkaEmptyCount();
        log.debug("Returning with backoff. No more data to read");
      }

      //Thread.sleep(10);
      return Status.READY;
    } else {
      return Status.READY;
    }
  } catch (Exception var18) {
    log.error("KafkaSource EXCEPTION, {}", var18);
    return Status.BACKOFF;
  }
}
 
Example 13
Source File: KafkaSource.java    From flume-ng-extends-source with MIT License 4 votes vote down vote up
public Status process() throws EventDeliveryException {

    byte[] kafkaMessage;
    byte[] kafkaKey;
    Event event;
    Map<String, String> headers;
    long batchStartTime = System.currentTimeMillis();
    long batchEndTime = System.currentTimeMillis() + timeUpperLimit;
    try {
      boolean iterStatus = false;
      long startTime = System.nanoTime();
      while (eventList.size() < batchUpperLimit &&
              System.currentTimeMillis() < batchEndTime) {
        iterStatus = hasNext();
        if (iterStatus) {
          // get next message
          MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next();
          kafkaMessage = messageAndMetadata.message();
          kafkaKey = messageAndMetadata.key();

          // Add headers to event (topic, timestamp, and key)
          headers = new HashMap<String, String>();
          headers.put(KafkaSourceConstants.TIMESTAMP,
                  String.valueOf(System.currentTimeMillis()));
          headers.put(KafkaSourceConstants.TOPIC, topic);
          if (kafkaKey != null) {
            headers.put(KafkaSourceConstants.KEY, new String(kafkaKey));
          }
          if (log.isDebugEnabled()) {
            log.debug("Message: {}", new String(kafkaMessage));
          }
          event = EventBuilder.withBody(kafkaMessage, headers);
          eventList.add(event);
        }
        if (log.isDebugEnabled()) {
          log.debug("Waited: {} ", System.currentTimeMillis() - batchStartTime);
          log.debug("Event #: {}", eventList.size());
        }
      }
      long endTime = System.nanoTime();
      counter.addToKafkaEventGetTimer((endTime-startTime)/(1000*1000));
      counter.addToEventReceivedCount(Long.valueOf(eventList.size()));
      // If we have events, send events to channel
      // clear the event list
      // and commit if Kafka doesn't auto-commit
      if (eventList.size() > 0) {
        getChannelProcessor().processEventBatch(eventList);
        counter.addToEventAcceptedCount(eventList.size());
        eventList.clear();
        if (log.isDebugEnabled()) {
          log.debug("Wrote {} events to channel", eventList.size());
        }
        if (!kafkaAutoCommitEnabled) {
          // commit the read transactions to Kafka to avoid duplicates
          long commitStartTime = System.nanoTime();
          consumer.commitOffsets();
          long commitEndTime = System.nanoTime();
          counter.addToKafkaCommitTimer((commitEndTime-commitStartTime)/(1000*1000));
        }
      }
      if (!iterStatus) {
        if (log.isDebugEnabled()) {
          counter.incrementKafkaEmptyCount();
          log.debug("Returning with backoff. No more data to read");
        }
        return Status.BACKOFF;
      }
      return Status.READY;
    } catch (Exception e) {
      log.error("KafkaSource EXCEPTION, {}", e);
      return Status.BACKOFF;
    }
  }
 
Example 14
Source File: PlasmaBackEnd.java    From warp10-platform with Apache License 2.0 4 votes vote down vote up
@Override
public void run() {
  long count = 0L;
  
  byte[] clslbls = new byte[16];
  
  try {
    ConsumerIterator<byte[],byte[]> iter = this.stream.iterator();

    byte[] inSipHashKey = backend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_BACKEND_IN);
    byte[] inAESKey = backend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_BACKEND_IN);

    byte[] outSipHashKey = backend.keystore.getKey(KeyStore.SIPHASH_KAFKA_PLASMA_BACKEND_OUT);
    byte[] outAESKey = backend.keystore.getKey(KeyStore.AES_KAFKA_PLASMA_BACKEND_OUT);

    // Iterate on the messages
    TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());

    // TODO(hbs): allow setting of writeBufferSize

    while (iter.hasNext()) {
      //
      // Since the cal to 'next' may block, we need to first
      // check that there is a message available
      //
      
      boolean nonEmpty = iter.nonEmpty();
      
      if (nonEmpty) {
        count++;
        MessageAndMetadata<byte[], byte[]> msg = iter.next();
        counters.count(msg.partition(), msg.offset());
        
        // Do nothing if there are no subscriptions
        if (null == backend.subscriptions || backend.subscriptions.isEmpty()) {
          continue;
        }
        
        byte[] data = msg.message();

        Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_BACKEND_KAFKA_IN_MESSAGES, Sensision.EMPTY_LABELS, 1);
        Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_BACKEND_KAFKA_IN_BYTES, Sensision.EMPTY_LABELS, data.length);
        
        if (null != inSipHashKey) {
          data = CryptoUtils.removeMAC(inSipHashKey, data);
        }
        
        // Skip data whose MAC was not verified successfully
        if (null == data) {
          Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_BACKEND_KAFKA_IN_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
          continue;
        }
        
        // Unwrap data if need be
        if (null != inAESKey) {
          data = CryptoUtils.unwrap(inAESKey, data);
        }
        
        // Skip data that was not unwrapped successfully
        if (null == data) {
          Sensision.update(SensisionConstants.SENSISION_CLASS_PLASMA_BACKEND_KAFKA_IN_INVALIDCIPHERS, Sensision.EMPTY_LABELS, 1);
          continue;
        }
        
        //
        // Extract KafkaDataMessage
        //
        
        KafkaDataMessage tmsg = new KafkaDataMessage();
        deserializer.deserialize(tmsg, data);
        
        switch(tmsg.getType()) {
          case STORE:
            backend.dispatch(clslbls, msg, tmsg, outSipHashKey, outAESKey);              
            break;
          case DELETE:
            break;
          default:
            throw new RuntimeException("Invalid message type.");
        }            
      } else {
        // Sleep a tiny while
        try {
          Thread.sleep(1L);
        } catch (InterruptedException ie) {             
        }
      }          
    }        
  } catch (Throwable t) {
    t.printStackTrace(System.err);
  } finally {
    // Set abort to true in case we exit the 'run' method
    backend.abort.set(true);
  }
}
 
Example 15
Source File: TestKafkaSinkV2.java    From suro with Apache License 2.0 4 votes vote down vote up
/** Tests backward compatability with old Kafka sink. */
@Test
public void testBackwardCompatability() throws Exception {
    int numPartitions = 9;

    TopicCommand.createTopic(zk.getZkClient(),
            new TopicCommand.TopicCommandOptions(new String[]{
                    "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME_BACKWARD_COMPAT,
                    "--replication-factor", "2", "--partitions", Integer.toString(numPartitions)}));
    String keyTopicMap = String.format("   \"keyTopicMap\": {\n" +
            "        \"%s\": \"key\"\n" +
            "    }", TOPIC_NAME_BACKWARD_COMPAT);

    String description1 = "{\n" +
        "    \"type\": \"kafkaV1\",\n" +
        "    \"client.id\": \"kafkasink\",\n" +
        "    \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
        "    \"ack\": 1,\n" +
        keyTopicMap + "\n" +
        "}";
    String description2 = "{\n" +
        "    \"type\": \"kafkaV2\",\n" +
        "    \"client.id\": \"kafkasink\",\n" +
        "    \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" +
        "    \"request.required.acks\": 1,\n" +
        keyTopicMap + "\n" +
        "}";

    // setup sinks, both old and new versions
    ObjectMapper jsonMapper = new DefaultObjectMapper();
    jsonMapper.registerSubtypes(new NamedType(KafkaSink.class, "kafkaV1"));
    jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafkaV2"));
    jsonMapper.setInjectableValues(new InjectableValues() {
        @Override
        public Object findInjectableValue(Object valueId, DeserializationContext ctxt, BeanProperty forProperty, Object beanInstance) {
            if (valueId.equals(KafkaRetentionPartitioner.class.getName())) {
                return new KafkaRetentionPartitioner();
            } else {
                return null;
            }
        }
    });
    KafkaSink sinkV1 = jsonMapper.readValue(description1, new TypeReference<Sink>(){});
    KafkaSinkV2 sinkV2 = jsonMapper.readValue(description2, new TypeReference<Sink>(){});
    sinkV1.open();
    sinkV2.open();
    List<Sink> sinks = new ArrayList<Sink>();
    sinks.add(sinkV1);
    sinks.add(sinkV2);

    // setup Kafka consumer (to read back messages)
    ConsumerConnector consumer = kafka.consumer.Consumer.createJavaConsumerConnector(
        createConsumerConfig("localhost:" + zk.getServerPort(), "gropuid"));
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC_NAME_BACKWARD_COMPAT, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = 
                                            consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(TOPIC_NAME_BACKWARD_COMPAT).get(0);

    // Send 20 test message, using the old and new Kafka sinks.
    // Retrieve the messages and ensure that they are identical and sent to the same partition.
    Random rand = new Random();
    int messageCount = 20;
    for (int i = 0; i < messageCount; ++i) {
        Map<String, Object> msgMap = new ImmutableMap.Builder<String, Object>()
                .put("key", new Long( rand.nextLong() ) )
                .put("value", "message:" + i).build();

        // send message to both sinks
        for( Sink sink : sinks ){
          sink.writeTo(new DefaultMessageContainer(
                new Message(TOPIC_NAME_BACKWARD_COMPAT, jsonMapper.writeValueAsBytes(msgMap)),
                jsonMapper));
        }

        // read two copies of message back from Kafka and check that partitions and data match
        MessageAndMetadata<byte[], byte[]> msgAndMeta1 = stream.iterator().next();
        MessageAndMetadata<byte[], byte[]> msgAndMeta2 = stream.iterator().next();
        System.out.println( "iteration: "+i+" partition1: "+msgAndMeta1.partition() );
        System.out.println( "iteration: "+i+" partition2: "+msgAndMeta2.partition() );
        assertEquals(msgAndMeta1.partition(), msgAndMeta2.partition());
        String msg1Str = new String( msgAndMeta1.message() );
        String msg2Str = new String( msgAndMeta2.message() );
        System.out.println( "iteration: "+i+" message1: "+msg1Str );
        System.out.println( "iteration: "+i+" message2: "+msg2Str );
        assertEquals(msg1Str, msg2Str);
    }

    // close sinks
    sinkV1.close();
    sinkV2.close();
    // close consumer
    try {
        stream.iterator().next();
        fail(); // there should be no data left to consume
    } catch (ConsumerTimeoutException e) {
        //this is expected
        consumer.shutdown();
    }
}
 
Example 16
Source File: Kafka09DataWriterTest.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
@Test
public void testKeyedAvroSerialization()
    throws IOException, InterruptedException, SchemaRegistryException {
  String topic = "testAvroSerialization09";
  _kafkaTestHelper.provisionTopic(topic);
  Properties props = new Properties();
  props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
  props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers",
      "127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort());
  props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer",
      LiAvroSerializer.class.getName());
  props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYED_CONFIG, "true");
  String keyField = "field1";
  props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYFIELD_CONFIG, keyField);


  // set up mock schema registry

  props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX
          + KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS,
      ConfigDrivenMd5SchemaRegistry.class.getCanonicalName());

  Kafka09DataWriter<String, GenericRecord> kafka09DataWriter = new Kafka09DataWriter<>(props);
  WriteCallback callback = mock(WriteCallback.class);

  GenericRecord record = TestUtils.generateRandomAvroRecord();
  try {
    kafka09DataWriter.write(record, callback);
  } finally {
    kafka09DataWriter.close();
  }

  verify(callback, times(1)).onSuccess(isA(WriteResponse.class));
  verify(callback, never()).onFailure(isA(Exception.class));
  MessageAndMetadata<byte[], byte[]> value = _kafkaTestHelper.getIteratorForTopic(topic).next();
  byte[] key = value.key();
  byte[] message = value.message();
  ConfigDrivenMd5SchemaRegistry schemaReg = new ConfigDrivenMd5SchemaRegistry(topic, record.getSchema());
  LiAvroDeserializer deser = new LiAvroDeserializer(schemaReg);
  GenericRecord receivedRecord = deser.deserialize(topic, message);
  Assert.assertEquals(record.toString(), receivedRecord.toString());
  Assert.assertEquals(new String(key), record.get(keyField));
}
 
Example 17
Source File: Kafka09DataWriterTest.java    From incubator-gobblin with Apache License 2.0 4 votes vote down vote up
@Test
public void testValueSerialization()
    throws IOException, InterruptedException, SchemaRegistryException {
  String topic = "testAvroSerialization09";
  _kafkaTestHelper.provisionTopic(topic);
  Properties props = new Properties();
  props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic);
  props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers",
      "127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort());
  props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer",
  "org.apache.kafka.common.serialization.StringSerializer");
  props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYED_CONFIG, "true");
  String keyField = "field1";
  props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYFIELD_CONFIG, keyField);
  props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_VALUEFIELD_CONFIG, keyField);


  // set up mock schema registry

  props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX
          + KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS,
      ConfigDrivenMd5SchemaRegistry.class.getCanonicalName());

  Kafka09DataWriter<String, GenericRecord> kafka09DataWriter = new Kafka09DataWriter<>(props);
  WriteCallback callback = mock(WriteCallback.class);

  GenericRecord record = TestUtils.generateRandomAvroRecord();
  try {
    kafka09DataWriter.write(record, callback);
  } finally {
    kafka09DataWriter.close();
  }

  verify(callback, times(1)).onSuccess(isA(WriteResponse.class));
  verify(callback, never()).onFailure(isA(Exception.class));
  MessageAndMetadata<byte[], byte[]> value = _kafkaTestHelper.getIteratorForTopic(topic).next();
  byte[] key = value.key();
  byte[] message = value.message();
  Assert.assertEquals(new String(message), record.get(keyField));
  Assert.assertEquals(new String(key), record.get(keyField));
}
 
Example 18
Source File: ThrottlingManagerEstimatorConsumerFactory.java    From warp10-platform with Apache License 2.0 4 votes vote down vote up
@Override
public Runnable getConsumer(final KafkaSynchronizedConsumerPool pool, final KafkaStream<byte[], byte[]> stream) {
  
  return new Runnable() {          
    @Override
    public void run() {
      ConsumerIterator<byte[],byte[]> iter = stream.iterator();

      // Iterate on the messages
      TDeserializer deserializer = new TDeserializer(new TCompactProtocol.Factory());

      KafkaOffsetCounters counters = pool.getCounters();
      
      try {
        while (iter.hasNext()) {
          //
          // Since the call to 'next' may block, we need to first
          // check that there is a message available
          //
          
          boolean nonEmpty = iter.nonEmpty();
          
          if (nonEmpty) {
            MessageAndMetadata<byte[], byte[]> msg = iter.next();
            counters.count(msg.partition(), msg.offset());
            
            byte[] data = msg.message();

            Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_KAFKA_THROTTLING_IN_MESSAGES, Sensision.EMPTY_LABELS, 1);
            Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_KAFKA_THROTTLING_IN_BYTES, Sensision.EMPTY_LABELS, data.length);
            
            if (null != macKey) {
              data = CryptoUtils.removeMAC(macKey, data);
            }
            
            // Skip data whose MAC was not verified successfully
            if (null == data) {
              Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_KAFKA_THROTTLING_IN_INVALIDMACS, Sensision.EMPTY_LABELS, 1);
              continue;
            }

            //
            // Update throttling manager
            //
            
            try {
              ThrottlingManager.fuse(HyperLogLogPlus.fromBytes(data));
              Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_THROTLLING_FUSIONS, Sensision.EMPTY_LABELS, 1);
            } catch (Exception e) {
              Sensision.update(SensisionConstants.CLASS_WARP_INGRESS_THROTLLING_FUSIONS_FAILED, Sensision.EMPTY_LABELS, 1);
            }
          }
        }        
      } catch (Throwable t) {
        t.printStackTrace(System.err);
      } finally {
        // Set abort to true in case we exit the 'run' method
        pool.getAbort().set(true);
      }
    }
  };
}
 
Example 19
Source File: InjectorSupport.java    From wisp with Apache License 2.0 4 votes vote down vote up
@Override
public void run() {

    LOGGER.info("start to run Injector{} for Topic{}", WispKafkaInjector.class.toString(), topic);

    while (it.hasNext()) {

        try {

            MessageAndMetadata<byte[], byte[]> mm = it.next();
            String message = new String(mm.message());

            // partition && offset
            long partition = mm.partition();
            long offset = mm.offset();

            MysqlEntry entry = gson.fromJson(message, MysqlEntry.class);

            // warp
            MysqlEntryWrap mysqlEntryWrap = new MysqlEntryWrap(topic, entry);

            LOGGER.debug(message);

            // 计算延迟时间
            long now = System.currentTimeMillis();
            long elapsedSinceMysql = (now - entry.getTime()) / 1000;
            long elapsedSinceCanal = (now - entry.getCanalTime()) / 1000;

            String originTableName = entry.getTable();

            if (injectorEventProcessTemplate != null) {
                injectorEventProcessTemplate.processEntry(mysqlEntryWrap);
            }

            LOGGER.info(
                    "Topic({}) Succeed to do Event{} inject from Table{}, mysql_delay={}, "
                            + "canal_delay={}, partition={}, offset={}",
                    topic,
                    entry.getEvent(),
                    originTableName, elapsedSinceMysql, elapsedSinceCanal, partition, offset);

        } catch (Throwable e) {

            LOGGER.error(e.toString());
        }
    }

}
 
Example 20
Source File: KafkaMessageAdapter.java    From message-queue-client-framework with Apache License 2.0 3 votes vote down vote up
/**
 * <p>Title: messageAdapter</p>
 * <p>Description: 消息适配方法</p>
 *
 * @param messageAndMetadata the message and metadata
 * @throws MQException the mq exception
 */
public void messageAdapter(MessageAndMetadata<?, ?> messageAndMetadata) throws MQException {

    byte[] keyBytes = (byte[]) messageAndMetadata.key();

    byte[] valBytes = (byte[]) messageAndMetadata.message();

    K k = decoder.decodeKey(keyBytes);

    V v = decoder.decodeVal(valBytes);

    messageListener.onMessage(k, v);
}