kafka.consumer.KafkaStream Java Examples

The following examples show how to use kafka.consumer.KafkaStream. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Kafka.java    From jlogstash-input-plugin with Apache License 2.0 6 votes vote down vote up
public void addNewConsumer(String topic, Integer threads){
	ConsumerConnector consumer = consumerConnMap.get(topic);
	Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = null;
	
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(topic, threads);
	consumerMap = consumer.createMessageStreams(topicCountMap);
	
	List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
	ExecutorService executor = Executors.newFixedThreadPool(threads);

	for (final KafkaStream<byte[], byte[]> stream : streams) {
		executor.submit(new Consumer(stream, this));
	}
	
	executorMap.put(topic, executor);
}
 
Example #2
Source File: FastKafkaSource.java    From fraud-detection-tutorial with Apache License 2.0 6 votes vote down vote up
public synchronized void start() {
  log.info("Starting {}...", this);

  try {
    this.consumer = KafkaSourceUtil.getConsumer(this.kafkaProps);
  } catch (Exception var6) {
    throw new FlumeException("Unable to create consumer. Check whether the ZooKeeper server is up and that the Flume agent can connect to it.", var6);
  }

  HashMap topicCountMap = new HashMap();
  topicCountMap.put(this.topic, Integer.valueOf(1));

  try {
    Map e = this.consumer.createMessageStreams(topicCountMap);
    List topicList = (List)e.get(this.topic);
    KafkaStream stream = (KafkaStream)topicList.get(0);
    this.it = stream.iterator();
  } catch (Exception var5) {
    throw new FlumeException("Unable to get message iterator from Kafka", var5);
  }

  log.info("Kafka source {} started.", this.getName());
  this.counter.start();
  super.start();
}
 
Example #3
Source File: KafkaMqCollect.java    From light_drtc with Apache License 2.0 6 votes vote down vote up
public void collectMq(){
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
       topicCountMap.put(Constants.kfTopic, new Integer(1));

       StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
       StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

       Map<String, List<KafkaStream<String, String>>> consumerMap =
               consumer.createMessageStreams(topicCountMap,keyDecoder,valueDecoder);
       
       KafkaStream<String, String> stream = consumerMap.get(Constants.kfTopic).get(0);
       ConsumerIterator<String, String> it = stream.iterator();
       MessageAndMetadata<String, String> msgMeta;
       while (it.hasNext()){
       	msgMeta = it.next();
       	super.mqTimer.parseMqText(msgMeta.key(), msgMeta.message());
       	//System.out.println(msgMeta.key()+"\t"+msgMeta.message());
       }
}
 
Example #4
Source File: ConsumerGroup.java    From yuzhouwan with Apache License 2.0 6 votes vote down vote up
private void run(int threadNum) {
    Map<String, Integer> topicCountMap = new HashMap<>();
    topicCountMap.put(topic, threadNum);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);

    executor = Executors.newFixedThreadPool(threadNum);

    int threadNumber = 0;
    _log.info("the streams size is {}", streams.size());
    for (final KafkaStream<byte[], byte[]> stream : streams) {
        executor.submit(new ConsumerWorker(stream, threadNumber));
        consumer.commitOffsets();
        threadNumber++;
    }
}
 
Example #5
Source File: KafkaWorker.java    From elasticsearch-river-kafka with Apache License 2.0 6 votes vote down vote up
@Override
public void run() {

    logger.debug("Index: {}: Kafka worker started...", riverConfig.getIndexName());

    if (consume) {
        logger.debug("Index: {}: Consumer is already running, new one will not be started...", riverConfig.getIndexName());
        return;
    }

    consume = true;
    try {
        logger.debug("Index: {}: Kafka consumer started...", riverConfig.getIndexName());

        while (consume) {
            KafkaStream stream = chooseRandomStream(kafkaConsumer.getStreams());
            consumeMessagesAndAddToBulkProcessor(stream);
        }
    } finally {
        logger.debug("Index: {}: Kafka consumer has stopped...", riverConfig.getIndexName());
        consume = false;
    }
}
 
Example #6
Source File: KafkaReceiver.java    From koper with Apache License 2.0 6 votes vote down vote up
private void processStreamsByTopic(String topicKeys, List<KafkaStream<byte[], byte[]>> streamList) {
    // init stream thread pool
    ExecutorService streamPool = Executors.newFixedThreadPool(partitions);
    String[] topics = StringUtils.split(topicKeys, ",");
    if (log.isDebugEnabled())
        log.debug("准备处理消息流集合 KafkaStreamList,topic count={},topics={}, partitions/topic={}", topics.length, topicKeys, partitions);

    //遍历stream
    AtomicInteger index = new AtomicInteger(0);
    for (KafkaStream<byte[], byte[]> stream : streamList) {
        Thread streamThread = new Thread() {

            @Override
            public void run() {
                int i = index.getAndAdd(1);
                if (log.isDebugEnabled())
                    log.debug("处理消息流KafkaStream -- No.={}, partitions={}", i, partitions + ":" + i);

                ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator();

                processStreamByConsumer(topicKeys, consumerIterator);
            }
        };
        streamPool.execute(streamThread);
    }
}
 
Example #7
Source File: KafkaConsumer.java    From sqoop-on-spark with Apache License 2.0 6 votes vote down vote up
public MessageAndMetadata getNextMessage(String topic) {
  List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
  // it has only a single stream, because there is only one consumer
  KafkaStream stream = streams.get(0);
  final ConsumerIterator<byte[], byte[]> it = stream.iterator();
  int counter = 0;
  try {
    if (it.hasNext()) {
      return it.next();
    } else {
      return null;
    }
  } catch (ConsumerTimeoutException e) {
    logger.error("0 messages available to fetch for the topic " + topic);
    return null;
  }
}
 
Example #8
Source File: NativeConsumer.java    From spring-kafka-demo with Apache License 2.0 6 votes vote down vote up
public void run(int a_numThreads) {
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(topic, new Integer(a_numThreads));
	Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
	List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);

	// now launch all the threads
	//
	executor = Executors.newFixedThreadPool(a_numThreads);

	// now create an object to consume the messages
	//
	int threadNumber = 0;
	for (final KafkaStream stream : streams) {
		executor.submit(new ConsumerTest(stream, threadNumber));
		threadNumber++;
	}
}
 
Example #9
Source File: KafkaOffsetGetter.java    From Kafka-Insight with Apache License 2.0 6 votes vote down vote up
/**
 * When an object implementing interface <code>Runnable</code> is used
 * to create a thread, starting the thread causes the object's
 * <code>run</code> method to be called in that separately executing
 * thread.
 * <p>
 * The general contract of the method <code>run</code> is that it may
 * take any action whatsoever.
 *
 * @see Thread#run()
 */
@Override
public void run() {
    ConsumerConnector consumerConnector = KafkaUtils.createConsumerConnector(zkAddr, group);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(CONSUMER_OFFSET_TOPIC, new Integer(1));
    KafkaStream<byte[], byte[]> offsetMsgStream = consumerConnector.createMessageStreams(topicCountMap).get(CONSUMER_OFFSET_TOPIC).get(0);

    ConsumerIterator<byte[], byte[]> it = offsetMsgStream.iterator();
    while (true) {

        MessageAndMetadata<byte[], byte[]> offsetMsg = it.next();
        if (ByteBuffer.wrap(offsetMsg.key()).getShort() < 2) {
            try {
                GroupTopicPartition commitKey = readMessageKey(ByteBuffer.wrap(offsetMsg.key()));
                if (offsetMsg.message() == null) {
                    continue;
                }
                kafka.common.OffsetAndMetadata commitValue = readMessageValue(ByteBuffer.wrap(offsetMsg.message()));
                kafkaConsumerOffsets.put(commitKey, commitValue);
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
    }
}
 
Example #10
Source File: KafkaReceiver.java    From koper with Apache License 2.0 6 votes vote down vote up
/**
 * 启动 MessageReceiver,开始监听topic消息
 */
@Override
public void start() {

    if (consumer == null) {
        //sync init
        synchronized (lock) {
            init();
        }
    }

    String topicString = buildTopicsString();

    Whitelist topicFilter = new Whitelist(topicString);
    List<KafkaStream<byte[], byte[]>> streamList = consumer.createMessageStreamsByFilter(topicFilter, partitions);

    if (org.apache.commons.collections.CollectionUtils.isEmpty(streamList))
        try {
            TimeUnit.MILLISECONDS.sleep(1);
        } catch (InterruptedException e) {
            log.warn(e.getMessage(), e);
        }
    processStreamsByTopic(topicString, streamList);

}
 
Example #11
Source File: KafkaSourceOp.java    From PoseidonX with Apache License 2.0 6 votes vote down vote up
/**
 * {@inheritDoc}
 */
@Override
public void initialize()
    throws StreamingException
{
    ConsumerConfig consumerConfig = new ConsumerConfig(kafkaProperties);
    consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);

    Map<String, Integer> topicCountMap = Maps.newHashMap();
    topicCountMap.put(topic, TOPIC_COUNT);

    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
        consumerConnector.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
    consumerIterator = stream.iterator();
}
 
Example #12
Source File: KafkaTestBase.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
KafkaConsumerSuite(String zkConnectString, String topic)
{
  _topic = topic;
  Properties consumeProps = new Properties();
  consumeProps.put("zookeeper.connect", zkConnectString);
  consumeProps.put("group.id", _topic+"-"+System.nanoTime());
  consumeProps.put("zookeeper.session.timeout.ms", "10000");
  consumeProps.put("zookeeper.sync.time.ms", "10000");
  consumeProps.put("auto.commit.interval.ms", "10000");
  consumeProps.put("_consumer.timeout.ms", "10000");

  _consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));

  Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
      _consumer.createMessageStreams(ImmutableMap.of(this._topic, 1));
  List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this._topic);
  _stream = streams.get(0);
  _iterator = _stream.iterator();
}
 
Example #13
Source File: KafkaTestBase.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
KafkaConsumerSuite(String zkConnectString, String topic)
{
  _topic = topic;
  Properties consumeProps = new Properties();
  consumeProps.put("zookeeper.connect", zkConnectString);
  consumeProps.put("group.id", _topic+"-"+System.nanoTime());
  consumeProps.put("zookeeper.session.timeout.ms", "10000");
  consumeProps.put("zookeeper.sync.time.ms", "10000");
  consumeProps.put("auto.commit.interval.ms", "10000");
  consumeProps.put("_consumer.timeout.ms", "10000");

  _consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));

  Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
      _consumer.createMessageStreams(ImmutableMap.of(this._topic, 1));
  List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this._topic);
  _stream = streams.get(0);
  _iterator = _stream.iterator();
}
 
Example #14
Source File: KafkaConsumer.java    From incubator-iotdb with Apache License 2.0 6 votes vote down vote up
private void consume() {
  /**
   * Specify the number of consumer thread
   */
  Map<String, Integer> topicCountMap = new HashMap<>();
  topicCountMap.put(Constant.TOPIC, Constant.CONSUMER_THREAD_NUM);

  /**
   * Specify data decoder
   */
  StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
  StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

  Map<String, List<KafkaStream<String, String>>> consumerMap = consumer
      .createMessageStreams(topicCountMap, keyDecoder,
          valueDecoder);

  List<KafkaStream<String, String>> streams = consumerMap.get(Constant.TOPIC);
  ExecutorService executor = Executors.newFixedThreadPool(Constant.CONSUMER_THREAD_NUM);
  for (final KafkaStream<String, String> stream : streams) {
    executor.submit(new KafkaConsumerThread(stream));
  }
}
 
Example #15
Source File: KafkaWorker.java    From elasticsearch-river-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Consumes the messages from the partition via specified stream.
 */
private void consumeMessagesAndAddToBulkProcessor(final KafkaStream stream) {

    try {
        // by default it waits forever for message, but there is timeout configured
        final ConsumerIterator<byte[], byte[]> consumerIterator = stream.iterator();

        // Consume all the messages of the stream (partition)
        while (consumerIterator.hasNext() && consume) {

            final MessageAndMetadata messageAndMetadata = consumerIterator.next();
            logMessage(messageAndMetadata);

            elasticsearchProducer.addMessagesToBulkProcessor(messageAndMetadata);

            // StatsD reporting
            stats.messagesReceived.incrementAndGet();
            stats.lastCommitOffsetByPartitionId.put(messageAndMetadata.partition(), messageAndMetadata.offset());
        }
    } catch (ConsumerTimeoutException ex) {
        logger.debug("Nothing to be consumed for now. Consume flag is: {}", consume);
    }
}
 
Example #16
Source File: KafkaConsumer.java    From flume-ng-kafka-sink with Apache License 2.0 6 votes vote down vote up
public MessageAndMetadata getNextMessage(String topic){
    List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
    KafkaStream stream = streams.get(0); // it has only a single stream, because there is only one consumer
    final ConsumerIterator<byte[], byte[]> it = stream.iterator();
    int counter = 0;
    while (!it.hasNext()){
        // Wait time >= 10s, so return null and exit
        if(counter == 5){
            logger.error("0 messages available to fetch for the topic " + topic);
            return null;
        }
        // wait till a message is published. this is a blocking call.
        try {
            Thread.sleep(2 * 1000);
        } catch (InterruptedException e) {
            // ignore
        }
        counter++;
    }
    return it.next();
}
 
Example #17
Source File: KafkaProducerServiceIntegrationTest.java    From vertx-kafka-service with Apache License 2.0 6 votes vote down vote up
private void consumeMessages() {
    final Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC, 1);
    final StringDecoder decoder =
            new StringDecoder(new VerifiableProperties());
    final Map<String, List<KafkaStream<String, String>>> consumerMap =
            consumer.createMessageStreams(topicCountMap, decoder, decoder);
    final KafkaStream<String, String> stream =
            consumerMap.get(TOPIC).get(0);
    final ConsumerIterator<String, String> iterator = stream.iterator();

    Thread kafkaMessageReceiverThread = new Thread(
            () -> {
                while (iterator.hasNext()) {
                    String msg = iterator.next().message();
                    msg = msg == null ? "<null>" : msg;
                    System.out.println("got message: " + msg);
                    messagesReceived.add(msg);
                }
            },
            "kafkaMessageReceiverThread"
    );
    kafkaMessageReceiverThread.start();

}
 
Example #18
Source File: KafkaClient.java    From opensoc-streaming with Apache License 2.0 6 votes vote down vote up
public void run(int a_numThreads) {
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(topic, new Integer(a_numThreads));
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
 
    logger.debug( "streams.size = " + streams.size() );
    
    // now launch all the threads
    //
    executor = Executors.newFixedThreadPool(a_numThreads);

    // now create an object to consume the messages
    //
    int threadNumber = 0;
    for (final KafkaStream stream : streams) {
        executor.submit(new KafkaConsumer(this.remote, stream, threadNumber));
        threadNumber++;
    }
}
 
Example #19
Source File: LegacyKafkaMessageIterator.java    From secor with Apache License 2.0 6 votes vote down vote up
@Override
public void init(SecorConfig config) throws UnknownHostException {
    this.mConfig = config;

    mConsumerConnector = Consumer.createJavaConsumerConnector(createConsumerConfig());

    if (!mConfig.getKafkaTopicBlacklist().isEmpty() && !mConfig.getKafkaTopicFilter().isEmpty()) {
        throw new RuntimeException("Topic filter and blacklist cannot be both specified.");
    }
    TopicFilter topicFilter = !mConfig.getKafkaTopicBlacklist().isEmpty() ? new Blacklist(mConfig.getKafkaTopicBlacklist()) :
            new Whitelist(mConfig.getKafkaTopicFilter());
    LOG.debug("Use TopicFilter {}({})", topicFilter.getClass(), topicFilter);
    List<KafkaStream<byte[], byte[]>> streams =
            mConsumerConnector.createMessageStreamsByFilter(topicFilter);
    KafkaStream<byte[], byte[]> stream = streams.get(0);
    mIterator = stream.iterator();
    mKafkaMessageTimestampFactory = new KafkaMessageTimestampFactory(mConfig.getKafkaMessageTimestampClass());
}
 
Example #20
Source File: WhenSpec.java    From Decision with Apache License 2.0 6 votes vote down vote up
@When("^I listen to a stream with name '(.*?)'$")
public void streamListen(
        @Transform(NullableStringConverter.class) String streamName) {
    commonspec.getLogger().info("Listening to stream {}", streamName);

    try {
        KafkaStream<String, StratioStreamingMessage> listener = commonspec
                .getStratioStreamingAPI().listenStream(streamName);
        commonspec.setStreamListener(listener);
    } catch (Exception e) {
        commonspec.getExceptions().add(e);
        commonspec
                .getLogger()
                .info("Caught an exception whilst listening to the stream {} : {}",
                        streamName, e);
    }
}
 
Example #21
Source File: KafkaDataProvider.java    From linden with Apache License 2.0 6 votes vote down vote up
public KafkaDataProvider(String zookeeper, String topic, String groupId) {
  super(MessageAndMetadata.class);
  Properties props = new Properties();
  props.put("zookeeper.connect", zookeeper);
  props.put("group.id", groupId);
  props.put("zookeeper.session.timeout.ms", "30000");
  props.put("auto.commit.interval.ms", "1000");
  props.put("fetch.message.max.bytes", "4194304");
  consumer = kafka.consumer.Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
  Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
  topicCountMap.put(topic, 1);
  Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
  KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);

  iter = stream.iterator();
}
 
Example #22
Source File: SimpleKafkaConsumer.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
public SimpleKafkaConsumer(Properties props, KafkaCheckpoint checkpoint)
{
  Config config = ConfigFactory.parseProperties(props);
  topic = config.getString("topic");
  String zkConnect = config.getString("zookeeper.connect");

  schemaRegistry = KafkaSchemaRegistryFactory.getSchemaRegistry(props);
  deserializer = new LiAvroDeserializer(schemaRegistry);
  /** TODO: Make Confluent schema registry integration configurable
   * HashMap<String, String> avroSerDeConfig = new HashMap<>();
   * avroSerDeConfig.put("schema.registry.url", "http://localhost:8081");
   * deserializer = new io.confluent.kafka.serializers.KafkaAvroDeserializer();
   * deserializer.configure(avroSerDeConfig, false);
   *
   **/

  Properties consumeProps = new Properties();
  consumeProps.put("zookeeper.connect", zkConnect);
  consumeProps.put("group.id", "gobblin-tool-" + System.nanoTime());
  consumeProps.put("zookeeper.session.timeout.ms", "10000");
  consumeProps.put("zookeeper.sync.time.ms", "10000");
  consumeProps.put("auto.commit.interval.ms", "10000");
  consumeProps.put("auto.offset.reset", "smallest");
  consumeProps.put("auto.commit.enable", "false");
  //consumeProps.put("consumer.timeout.ms", "10000");

  consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));

  Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(ImmutableMap.of(topic, 1));
  List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this.topic);
  stream = streams.get(0);

  iterator = stream.iterator();
}
 
Example #23
Source File: KafkaConsumer.java    From pentaho-kafka-consumer with Apache License 2.0 5 votes vote down vote up
public boolean init(StepMetaInterface smi, StepDataInterface sdi) {
    super.init(smi, sdi);

    KafkaConsumerMeta meta = (KafkaConsumerMeta) smi;
    KafkaConsumerData data = (KafkaConsumerData) sdi;

    Properties properties = meta.getKafkaProperties();
    Properties substProperties = new Properties();
    for (Entry<Object, Object> e : properties.entrySet()) {
        substProperties.put(e.getKey(), environmentSubstitute(e.getValue().toString()));
    }
    if (meta.isStopOnEmptyTopic()) {

        // If there isn't already a provided value, set a default of 1s
        if (!substProperties.containsKey(CONSUMER_TIMEOUT_KEY)) {
            substProperties.put(CONSUMER_TIMEOUT_KEY, "1000");
        }
    } else {
        if (substProperties.containsKey(CONSUMER_TIMEOUT_KEY)) {
            logError(Messages.getString("KafkaConsumer.WarnConsumerTimeout"));
        }
    }
    ConsumerConfig consumerConfig = new ConsumerConfig(substProperties);

    logBasic(Messages.getString("KafkaConsumer.CreateKafkaConsumer.Message", consumerConfig.zkConnect()));
    data.consumer = Consumer.createJavaConsumerConnector(consumerConfig);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    String topic = environmentSubstitute(meta.getTopic());
    topicCountMap.put(topic, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> streamsMap = data.consumer.createMessageStreams(topicCountMap);
    logDebug("Received streams map: " + streamsMap);
    data.streamIterator = streamsMap.get(topic).get(0).iterator();

    return true;
}
 
Example #24
Source File: KafkaSpout.java    From monasca-thresh with Apache License 2.0 5 votes vote down vote up
@Override
public void activate() {
  logger.info("Activated");
  if (streams == null) {
    Map<String, Integer> topicCountMap = new HashMap<>();
    topicCountMap.put(kafkaSpoutConfig.kafkaConsumerConfiguration.getTopic(), new Integer(1));
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
        consumerConnector.createMessageStreams(topicCountMap);
    streams = consumerMap.get(kafkaSpoutConfig.kafkaConsumerConfiguration.getTopic());
  }
}
 
Example #25
Source File: KafkaTestBase.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
public KafkaTestBase(String topic) throws InterruptedException, RuntimeException {

    startServer();

    this.topic = topic;

    AdminUtils.createTopic(zkClient, topic, 1, 1, new Properties());

    List<KafkaServer> servers = new ArrayList<>();
    servers.add(kafkaServer);
    TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000);

    Properties consumeProps = new Properties();
    consumeProps.put("zookeeper.connect", zkConnect);
    consumeProps.put("group.id", "testConsumer");
    consumeProps.put("zookeeper.session.timeout.ms", "10000");
    consumeProps.put("zookeeper.sync.time.ms", "10000");
    consumeProps.put("auto.commit.interval.ms", "10000");
    consumeProps.put("consumer.timeout.ms", "10000");

    consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));

    Map<String, Integer> topicCountMap = new HashMap<>();
    topicCountMap.put(this.topic, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this.topic);
    stream = streams.get(0);

    iterator = stream.iterator();
  }
 
Example #26
Source File: Store.java    From warp10-platform with Apache License 2.0 5 votes vote down vote up
public StoreConsumer(Table table, Store store, KafkaStream<byte[], byte[]> stream, KafkaOffsetCounters counters) {
  this.store = store;
  this.stream = stream;
  this.puts = new ArrayList<Put>();
  this.counters = counters;
  this.table = table;
  this.hbaseAESKey = store.keystore.getKey(KeyStore.AES_HBASE_DATA);
}
 
Example #27
Source File: IngestFromKafkaDriver.java    From geowave with Apache License 2.0 5 votes vote down vote up
public <T> void consumeFromTopic(
    final String formatPluginName,
    final GeoWaveAvroFormatPlugin<T, ?> avroFormatPlugin,
    final KafkaIngestRunData ingestRunData,
    final List<String> queue) {

  final ConsumerConnector consumer = buildKafkaConsumer();
  if (consumer == null) {
    throw new RuntimeException(
        "Kafka consumer connector is null, unable to create message streams");
  }
  try {
    LOGGER.debug(
        "Kafka consumer setup for format ["
            + formatPluginName
            + "] against topic ["
            + formatPluginName
            + "]");
    final Map<String, Integer> topicCount = new HashMap<>();
    topicCount.put(formatPluginName, 1);

    final Map<String, List<KafkaStream<byte[], byte[]>>> consumerStreams =
        consumer.createMessageStreams(topicCount);
    final List<KafkaStream<byte[], byte[]>> streams = consumerStreams.get(formatPluginName);

    queue.remove(formatPluginName);
    consumeMessages(formatPluginName, avroFormatPlugin, ingestRunData, streams.get(0));
  } finally {
    consumer.shutdown();
  }
}
 
Example #28
Source File: HighLevelConsumerExample.java    From pulsar with Apache License 2.0 5 votes vote down vote up
private static void consumeMessage(Arguments arguments) {

        Properties properties = new Properties();
        properties.put("zookeeper.connect", arguments.serviceUrl);
        properties.put("group.id", arguments.groupName);
        properties.put("consumer.id", "cons1");
        properties.put("auto.commit.enable", Boolean.toString(!arguments.autoCommitDisable));
        properties.put("auto.commit.interval.ms", "100");
        properties.put("queued.max.message.chunks", "100");

        ConsumerConfig conSConfig = new ConsumerConfig(properties);
        ConsumerConnector connector = Consumer.createJavaConsumerConnector(conSConfig);
        Map<String, Integer> topicCountMap = Collections.singletonMap(arguments.topicName, 2);
        Map<String, List<KafkaStream<String, Tweet>>> streams = connector.createMessageStreams(topicCountMap,
                new StringDecoder(null), new Tweet.TestDecoder());

        int count = 0;
        while (count < arguments.totalMessages || arguments.totalMessages == -1) {
            for (int i = 0; i < streams.size(); i++) {
                List<KafkaStream<String, Tweet>> kafkaStreams = streams.get(arguments.topicName);
                for (KafkaStream<String, Tweet> kafkaStream : kafkaStreams) {
                    for (MessageAndMetadata<String, Tweet> record : kafkaStream) {
                        log.info("Received tweet: {}-{}", record.message().userName, record.message().message);
                        count++;
                    }
                }
            }
        }

        connector.shutdown();

        log.info("successfully consumed message {}", count);
    }
 
Example #29
Source File: DemoHighLevelConsumer.java    From KafkaExample with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
	args = new String[] { "zookeeper0:2181/kafka", "topic1", "group2", "consumer1" };
	if (args == null || args.length != 4) {
		System.err.println("Usage:\n\tjava -jar kafka_consumer.jar ${zookeeper_list} ${topic_name} ${group_name} ${consumer_id}");
		System.exit(1);
	}
	String zk = args[0];
	String topic = args[1];
	String groupid = args[2];
	String consumerid = args[3];
	Properties props = new Properties();
	props.put("zookeeper.connect", zk);
	props.put("group.id", groupid);
	props.put("client.id", "test");
	props.put("consumer.id", consumerid);
	props.put("auto.offset.reset", "largest");
	props.put("auto.commit.enable", "false");
	props.put("auto.commit.interval.ms", "60000");

	ConsumerConfig consumerConfig = new ConsumerConfig(props);
	ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);

	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(topic, 1);
	Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicCountMap);

	KafkaStream<byte[], byte[]> stream1 = consumerMap.get(topic).get(0);
	ConsumerIterator<byte[], byte[]> interator = stream1.iterator();
	while (interator.hasNext()) {
		MessageAndMetadata<byte[], byte[]> messageAndMetadata = interator.next();
		String message = String.format(
				"Topic:%s, GroupID:%s, Consumer ID:%s, PartitionID:%s, Offset:%s, Message Key:%s, Message Payload: %s",
				messageAndMetadata.topic(), groupid, consumerid, messageAndMetadata.partition(),
				messageAndMetadata.offset(), new String(messageAndMetadata.key()),
				new String(messageAndMetadata.message()));
		System.out.println(message);
		consumerConnector.commitOffsets();
	}
}
 
Example #30
Source File: JavaKafkaConsumerHighAPIHdfsImpl.java    From dk-fitting with Apache License 2.0 5 votes vote down vote up
public void run() {
    // 1. 指定Topic
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(this.topic, this.numThreads);

    // 2. 指定数据的解码器
    StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
    StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

    // 3. 获取连接数据的迭代器对象集合
    /**
     * Key: Topic主题
     * Value: 对应Topic的数据流读取器,大小是topicCountMap中指定的topic大小
     */
    Map<String, List<KafkaStream<String, String>>> consumerMap = this.consumer.createMessageStreams(topicCountMap, keyDecoder, valueDecoder);

    // 4. 从返回结果中获取对应topic的数据流处理器
    List<KafkaStream<String, String>> streams = consumerMap.get(this.topic);

    // 5. 创建线程池
    this.executorPool = Executors.newFixedThreadPool(this.numThreads);


    // 6. 构建数据输出对象
    int threadNumber = 0;
    for (final KafkaStream<String, String> stream : streams) {
        this.executorPool.submit(new ConsumerKafkaStreamProcesser(stream, threadNumber));
        threadNumber++;
    }
}