Java Code Examples for kafka.consumer.Consumer#createJavaConsumerConnector()

The following examples show how to use kafka.consumer.Consumer#createJavaConsumerConnector() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaTestBase.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
KafkaConsumerSuite(String zkConnectString, String topic)
{
  _topic = topic;
  Properties consumeProps = new Properties();
  consumeProps.put("zookeeper.connect", zkConnectString);
  consumeProps.put("group.id", _topic+"-"+System.nanoTime());
  consumeProps.put("zookeeper.session.timeout.ms", "10000");
  consumeProps.put("zookeeper.sync.time.ms", "10000");
  consumeProps.put("auto.commit.interval.ms", "10000");
  consumeProps.put("_consumer.timeout.ms", "10000");

  _consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));

  Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
      _consumer.createMessageStreams(ImmutableMap.of(this._topic, 1));
  List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this._topic);
  _stream = streams.get(0);
  _iterator = _stream.iterator();
}
 
Example 2
Source File: KafkaTestBase.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
KafkaConsumerSuite(String zkConnectString, String topic)
{
  _topic = topic;
  Properties consumeProps = new Properties();
  consumeProps.put("zookeeper.connect", zkConnectString);
  consumeProps.put("group.id", _topic+"-"+System.nanoTime());
  consumeProps.put("zookeeper.session.timeout.ms", "10000");
  consumeProps.put("zookeeper.sync.time.ms", "10000");
  consumeProps.put("auto.commit.interval.ms", "10000");
  consumeProps.put("_consumer.timeout.ms", "10000");

  _consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));

  Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap =
      _consumer.createMessageStreams(ImmutableMap.of(this._topic, 1));
  List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this._topic);
  _stream = streams.get(0);
  _iterator = _stream.iterator();
}
 
Example 3
Source File: KafkaExample.java    From pragmatic-java-engineer with GNU General Public License v3.0 5 votes vote down vote up
public static void consumer() {
    Properties props = new Properties();
    props.put("zookeeper.connect", "zk1.dmp.com:2181,zk2.dmp.com:2181,zk3.dmp.com:2181");
    props.put("zookeeper.session.timeout.ms", "3000");
    props.put("zookeeper.sync.time.ms", "200");
    props.put("group.id", "test_group");
    props.put("auto.commit.interval.ms", "600");

    String topic = "test_topic";
    ConsumerConnector connector = Consumer.createJavaConsumerConnector(new ConsumerConfig(props));
    Map<String, Integer> topics = new HashMap<String, Integer>();
    int partitionNum = 3;//分区数目
    topics.put(topic, partitionNum);
    Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topics);
    List<KafkaStream<byte[], byte[]>> partitions = streams.get(topic);
    Executor threadPool = Executors.newFixedThreadPool(partitionNum);
    for (final KafkaStream<byte[], byte[]> partition : partitions) {
        threadPool.execute(
                new Runnable() {
                    @Override
                    public void run() {
                        ConsumerIterator<byte[], byte[]> it = partition.iterator();
                        while (it.hasNext()) {
                            MessageAndMetadata<byte[], byte[]> item = it.next();
                            byte[] messageBody = item.message();
                        }
                    }
                });
    }
}
 
Example 4
Source File: KafkaSourceUtil.java    From flume-ng-extends-source with MIT License 5 votes vote down vote up
public static ConsumerConnector getConsumer(Properties kafkaProps) {
  ConsumerConfig consumerConfig =
          new ConsumerConfig(kafkaProps);
  ConsumerConnector consumer =
          Consumer.createJavaConsumerConnector(consumerConfig);
  return consumer;
}
 
Example 5
Source File: MessageResource.java    From dropwizard-kafka-http with Apache License 2.0 5 votes vote down vote up
@GET
@Timed
public Response consume(
        @QueryParam("topic") String topic,
        @QueryParam("timeout") Integer timeout
) {
    if (Strings.isNullOrEmpty(topic))
        return Response.status(400)
                .entity(new String[]{"Undefined topic"})
                .build();

    Properties props = (Properties) consumerCfg.clone();
    if (timeout != null) props.put("consumer.timeout.ms", "" + timeout);

    ConsumerConfig config = new ConsumerConfig(props);
    ConsumerConnector connector = Consumer.createJavaConsumerConnector(config);

    Map<String, Integer> streamCounts = Collections.singletonMap(topic, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(streamCounts);
    KafkaStream<byte[], byte[]> stream = streams.get(topic).get(0);

    List<Message> messages = new ArrayList<>();
    try {
        for (MessageAndMetadata<byte[], byte[]> messageAndMetadata : stream)
            messages.add(new Message(messageAndMetadata));
    } catch (ConsumerTimeoutException ignore) {
    } finally {
        connector.commitOffsets();
        connector.shutdown();
    }

    return Response.ok(messages).build();
}
 
Example 6
Source File: DemoHighLevelConsumer.java    From KafkaExample with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
	args = new String[] { "zookeeper0:2181/kafka", "topic1", "group2", "consumer1" };
	if (args == null || args.length != 4) {
		System.err.println("Usage:\n\tjava -jar kafka_consumer.jar ${zookeeper_list} ${topic_name} ${group_name} ${consumer_id}");
		System.exit(1);
	}
	String zk = args[0];
	String topic = args[1];
	String groupid = args[2];
	String consumerid = args[3];
	Properties props = new Properties();
	props.put("zookeeper.connect", zk);
	props.put("group.id", groupid);
	props.put("client.id", "test");
	props.put("consumer.id", consumerid);
	props.put("auto.offset.reset", "largest");
	props.put("auto.commit.enable", "false");
	props.put("auto.commit.interval.ms", "60000");

	ConsumerConfig consumerConfig = new ConsumerConfig(props);
	ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(consumerConfig);

	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(topic, 1);
	Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumerConnector.createMessageStreams(topicCountMap);

	KafkaStream<byte[], byte[]> stream1 = consumerMap.get(topic).get(0);
	ConsumerIterator<byte[], byte[]> interator = stream1.iterator();
	while (interator.hasNext()) {
		MessageAndMetadata<byte[], byte[]> messageAndMetadata = interator.next();
		String message = String.format(
				"Topic:%s, GroupID:%s, Consumer ID:%s, PartitionID:%s, Offset:%s, Message Key:%s, Message Payload: %s",
				messageAndMetadata.topic(), groupid, consumerid, messageAndMetadata.partition(),
				messageAndMetadata.offset(), new String(messageAndMetadata.key()),
				new String(messageAndMetadata.message()));
		System.out.println(message);
		consumerConnector.commitOffsets();
	}
}
 
Example 7
Source File: KafkaTestBase.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
public KafkaTestBase(String topic) throws InterruptedException, RuntimeException {

    startServer();

    this.topic = topic;

    AdminUtils.createTopic(zkClient, topic, 1, 1, new Properties());

    List<KafkaServer> servers = new ArrayList<>();
    servers.add(kafkaServer);
    TestUtils.waitUntilMetadataIsPropagated(scala.collection.JavaConversions.asScalaBuffer(servers), topic, 0, 5000);

    Properties consumeProps = new Properties();
    consumeProps.put("zookeeper.connect", zkConnect);
    consumeProps.put("group.id", "testConsumer");
    consumeProps.put("zookeeper.session.timeout.ms", "10000");
    consumeProps.put("zookeeper.sync.time.ms", "10000");
    consumeProps.put("auto.commit.interval.ms", "10000");
    consumeProps.put("consumer.timeout.ms", "10000");

    consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumeProps));

    Map<String, Integer> topicCountMap = new HashMap<>();
    topicCountMap.put(this.topic, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(this.topic);
    stream = streams.get(0);

    iterator = stream.iterator();
  }
 
Example 8
Source File: KafkaChannel.java    From monasca-persister with Apache License 2.0 5 votes vote down vote up
@Inject
public KafkaChannel(PersisterConfig configuration, @Assisted PipelineConfig pipelineConfig,
    @Assisted String threadId) {

  this.topic = pipelineConfig.getTopic();
  this.threadId = threadId;
  this.commitBatchtimeInMills = pipelineConfig.getCommitBatchTime();
  nextCommitTime = System.currentTimeMillis() + commitBatchtimeInMills;
  Properties kafkaProperties = createKafkaProperties(configuration.getKafkaConfig(), pipelineConfig);
  consumerConnector = Consumer.createJavaConsumerConnector(createConsumerConfig(kafkaProperties));
}
 
Example 9
Source File: KafkaConsumer.java    From blog_demos with Apache License 2.0 5 votes vote down vote up
/**
 * 启动一个consumer
 * @param topic
 */
public void startConsume(String topic){
    Properties props = new Properties();
    props.put("zookeeper.connect", zkConnect);
    props.put("group.id", groupId);
    props.put("zookeeper.session.timeout.ms", "40000");
    props.put("zookeeper.sync.time.ms", "200");
    props.put("auto.commit.interval.ms", "1000");
    ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props));


    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(topic, new Integer(1));
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
    final ConsumerIterator<byte[], byte[]> it = stream.iterator();

    Runnable executor = new Runnable() {
        @Override
        public void run() {
            while (it.hasNext()) {
                System.out.println("************** receive:" + new String(it.next().message()));
                try {
                    Thread.sleep(3000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }
    };

    new Thread(executor).start();
}
 
Example 10
Source File: KafkaConsumer.java    From blog_demos with Apache License 2.0 5 votes vote down vote up
/**
 * 启动一个consumer
 * @param topic
 */
public void startConsume(String topic){
    Properties props = new Properties();
    props.put("zookeeper.connect", zkConnect);
    props.put("group.id", groupId);
    props.put("zookeeper.session.timeout.ms", "40000");
    props.put("zookeeper.sync.time.ms", "200");
    props.put("auto.commit.interval.ms", "1000");
    ConsumerConnector consumer = Consumer.createJavaConsumerConnector(new ConsumerConfig(props));


    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(topic, new Integer(1));
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    KafkaStream<byte[], byte[]> stream = consumerMap.get(topic).get(0);
    final ConsumerIterator<byte[], byte[]> it = stream.iterator();

    Runnable executor = new Runnable() {
        @Override
        public void run() {
            while (it.hasNext()) {
                System.out.println("************** receive:" + new String(it.next().message()));
                try {
                    Thread.sleep(3000);
                } catch (InterruptedException e) {
                    e.printStackTrace();
                }
            }
        }
    };

    new Thread(executor).start();
}
 
Example 11
Source File: KafkaConsumerTest.java    From pentaho-kafka-consumer with Apache License 2.0 5 votes vote down vote up
@Test
public void withStopOnEmptyTopic() throws KettleException {

    meta.setStopOnEmptyTopic(true);
    TransMeta tm = TransTestFactory.generateTestTransformation(new Variables(), meta, STEP_NAME);

    TransTestFactory.executeTestTransformation(tm, TransTestFactory.INJECTOR_STEPNAME,
            STEP_NAME, TransTestFactory.DUMMY_STEPNAME, new ArrayList<RowMetaAndData>());

    PowerMockito.verifyStatic();
    ArgumentCaptor<ConsumerConfig> consumerConfig = ArgumentCaptor.forClass(ConsumerConfig.class);
    Consumer.createJavaConsumerConnector(consumerConfig.capture());

    assertEquals(1000, consumerConfig.getValue().consumerTimeoutMs());
}
 
Example 12
Source File: KafkaUtils.java    From Kafka-Insight with Apache License 2.0 5 votes vote down vote up
public static ConsumerConnector createConsumerConnector(String zkAddr, String group) {
    Properties props = new Properties();
    props.put(ConsumerConfig.GROUP_ID_CONFIG, group);
    props.put(ConsumerConfig.EXCLUDE_INTERNAL_TOPICS_CONFIG, "false");
    props.put(KafkaConfig.ZkConnectProp(), zkAddr);
    ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(props));
    return consumerConnector;
}
 
Example 13
Source File: KafkaConsumer.java    From pentaho-kafka-consumer with Apache License 2.0 5 votes vote down vote up
public boolean init(StepMetaInterface smi, StepDataInterface sdi) {
    super.init(smi, sdi);

    KafkaConsumerMeta meta = (KafkaConsumerMeta) smi;
    KafkaConsumerData data = (KafkaConsumerData) sdi;

    Properties properties = meta.getKafkaProperties();
    Properties substProperties = new Properties();
    for (Entry<Object, Object> e : properties.entrySet()) {
        substProperties.put(e.getKey(), environmentSubstitute(e.getValue().toString()));
    }
    if (meta.isStopOnEmptyTopic()) {

        // If there isn't already a provided value, set a default of 1s
        if (!substProperties.containsKey(CONSUMER_TIMEOUT_KEY)) {
            substProperties.put(CONSUMER_TIMEOUT_KEY, "1000");
        }
    } else {
        if (substProperties.containsKey(CONSUMER_TIMEOUT_KEY)) {
            logError(Messages.getString("KafkaConsumer.WarnConsumerTimeout"));
        }
    }
    ConsumerConfig consumerConfig = new ConsumerConfig(substProperties);

    logBasic(Messages.getString("KafkaConsumer.CreateKafkaConsumer.Message", consumerConfig.zkConnect()));
    data.consumer = Consumer.createJavaConsumerConnector(consumerConfig);
    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    String topic = environmentSubstitute(meta.getTopic());
    topicCountMap.put(topic, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> streamsMap = data.consumer.createMessageStreams(topicCountMap);
    logDebug("Received streams map: " + streamsMap);
    data.streamIterator = streamsMap.get(topic).get(0).iterator();

    return true;
}
 
Example 14
Source File: KafkaPublisherTest.java    From localization_nifi with Apache License 2.0 5 votes vote down vote up
private ConsumerIterator<byte[], byte[]> buildConsumer(String topic) {
    Properties props = new Properties();
    props.put("zookeeper.connect", "localhost:" + kafkaLocal.getZookeeperPort());
    props.put("group.id", "test");
    props.put("consumer.timeout.ms", "5000");
    props.put("auto.offset.reset", "smallest");
    ConsumerConfig consumerConfig = new ConsumerConfig(props);
    ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
    Map<String, Integer> topicCountMap = new HashMap<>(1);
    topicCountMap.put(topic, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
    ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator();
    return iter;
}
 
Example 15
Source File: KafkaPublisherTest.java    From nifi with Apache License 2.0 5 votes vote down vote up
private ConsumerIterator<byte[], byte[]> buildConsumer(String topic) {
    Properties props = new Properties();
    props.put("zookeeper.connect", "localhost:" + kafkaLocal.getZookeeperPort());
    props.put("group.id", "test");
    props.put("consumer.timeout.ms", "5000");
    props.put("auto.offset.reset", "smallest");
    ConsumerConfig consumerConfig = new ConsumerConfig(props);
    ConsumerConnector consumer = Consumer.createJavaConsumerConnector(consumerConfig);
    Map<String, Integer> topicCountMap = new HashMap<>(1);
    topicCountMap.put(topic, 1);
    Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);
    ConsumerIterator<byte[], byte[]> iter = streams.get(0).iterator();
    return iter;
}
 
Example 16
Source File: MessageServiceImpl.java    From blog_demos with Apache License 2.0 4 votes vote down vote up
@PostConstruct
public void init(){
    logger.info("start init kafka consumer service");
    // 1. 创建Kafka连接器
    consumer = Consumer.createJavaConsumerConnector(createConsumerConfig(ZK, GROUP_ID));

    Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
    topicCountMap.put(TOPIC, THREAD_NUM);

    // 2. 指定数据的解码器
    StringDecoder keyDecoder = new StringDecoder(new VerifiableProperties());
    StringDecoder valueDecoder = new StringDecoder(new VerifiableProperties());

    // 3. 获取连接数据的迭代器对象集合
    /**
     * Key: Topic主题
     * Value: 对应Topic的数据流读取器,大小是topicCountMap中指定的topic大小
     */
    Map<String, List<KafkaStream<String, String>>> consumerMap = this.consumer.createMessageStreams(topicCountMap, keyDecoder, valueDecoder);

    // 4. 从返回结果中获取对应topic的数据流处理器
    List<KafkaStream<String, String>> streams = consumerMap.get(TOPIC);

    logger.info("streams size {}", streams.size());

    // 5. 创建线程池
    this.executorPool = new ThreadPoolExecutor(THREAD_NUM, THREAD_NUM,
            0,
            TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<Runnable>(),
            new CustomThreadFactory(),
            new ThreadPoolExecutor.AbortPolicy());

    // 6. 构建数据输出对象
    int threadNumber = 0;
    for (final KafkaStream<String, String> stream : streams) {
        this.executorPool.submit(new Processer(stream, threadNumber));
        threadNumber++;
    }

    logger.info("end init kafka consumer service");
}
 
Example 17
Source File: NativeKafkaWithStringDecoderTest.java    From hermes with Apache License 2.0 4 votes vote down vote up
@Test
public void testNative() throws IOException, InterruptedException, ExecutionException {
	String topic = "kafka.SimpleTextTopic";
	int msgNum = 200;
	final CountDownLatch countDown = new CountDownLatch(msgNum);

	Properties producerProps = new Properties();
	// Producer
	producerProps.put("bootstrap.servers", "");
	producerProps.put("value.serializer", StringSerializer.class.getCanonicalName());
	producerProps.put("key.serializer", StringSerializer.class.getCanonicalName());
	// Consumer
	Properties consumerProps = new Properties();
	consumerProps.put("zookeeper.connect", "");
	consumerProps.put("group.id", "GROUP_" + topic);

	final List<String> actualResult = new ArrayList<String>();
	final List<String> expectedResult = new ArrayList<String>();

	ConsumerConnector consumerConnector = Consumer.createJavaConsumerConnector(new ConsumerConfig(consumerProps));
	Map<String, Integer> topicCountMap = new HashMap<String, Integer>();
	topicCountMap.put(topic, 1);
	final List<KafkaStream<String, String>> streams = consumerConnector.createMessageStreams(topicCountMap,
	      new StringDecoder(null), new StringDecoder(null)).get(topic);
	for (final KafkaStream<String, String> stream : streams) {
		new Thread() {
			public void run() {
				for (MessageAndMetadata<String, String> msgAndMetadata : stream) {
					try {
						System.out.println("received: " + msgAndMetadata.message());
						actualResult.add(msgAndMetadata.message());
						countDown.countDown();
					} catch (Exception e) {
						e.printStackTrace();
					}
				}
			}
		}.start();
	}

	KafkaProducer<String, String> producer = new KafkaProducer<String, String>(producerProps);
	int i = 0;
	while (i < msgNum) {
		ProducerRecord<String, String> data = new ProducerRecord<String, String>(topic, "test-message" + i++);
		Future<RecordMetadata> send = producer.send(data);
		send.get();
		if (send.isDone()) {
			System.out.println("sending: " + data.value());
			expectedResult.add(data.value());
		}
	}

	countDown.await();

	Assert.assertArrayEquals(expectedResult.toArray(), actualResult.toArray());

	consumerConnector.shutdown();
	producer.close();
}
 
Example 18
Source File: GetKafka.java    From nifi with Apache License 2.0 4 votes vote down vote up
public void createConsumers(final ProcessContext context) {
    final String topic = context.getProperty(TOPIC).evaluateAttributeExpressions().getValue();

    final Properties props = new Properties();
    props.setProperty("zookeeper.connect", context.getProperty(ZOOKEEPER_CONNECTION_STRING).evaluateAttributeExpressions().getValue());
    props.setProperty("group.id", context.getProperty(GROUP_ID).evaluateAttributeExpressions().getValue());
    props.setProperty("client.id", context.getProperty(CLIENT_NAME).getValue());
    props.setProperty("auto.commit.interval.ms", String.valueOf(context.getProperty(ZOOKEEPER_COMMIT_DELAY).asTimePeriod(TimeUnit.MILLISECONDS)));
    props.setProperty("auto.offset.reset", context.getProperty(AUTO_OFFSET_RESET).getValue());
    props.setProperty("zookeeper.connection.timeout.ms", context.getProperty(ZOOKEEPER_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).toString());
    props.setProperty("socket.timeout.ms", context.getProperty(KAFKA_TIMEOUT).asTimePeriod(TimeUnit.MILLISECONDS).toString());

    for (final Entry<PropertyDescriptor, String> entry : context.getProperties().entrySet()) {
        PropertyDescriptor descriptor = entry.getKey();
        if (descriptor.isDynamic()) {
            if (props.containsKey(descriptor.getName())) {
                this.getLogger().warn("Overriding existing property '" + descriptor.getName() + "' which had value of '"
                    + props.getProperty(descriptor.getName()) + "' with dynamically set value '" + entry.getValue() + "'.");
            }
            props.setProperty(descriptor.getName(), entry.getValue());
        }
    }

    /*
     * Unless user sets it to some explicit value we are setting it to the
     * lowest possible value of 1 millisecond to ensure the
     * consumerStream.hasNext() doesn't block. See
     * http://kafka.apache.org/documentation.html#configuration) as well as
     * comment in 'catch ConsumerTimeoutException' in onTrigger() for more
     * explanation as to the reasoning behind it.
     */
    if (!props.containsKey("consumer.timeout.ms")) {
        this.getLogger().info("Setting 'consumer.timeout.ms' to 1 milliseconds to avoid consumer"
                        + " block in the event when no events are present in Kafka topic. If you wish to change this value "
                        + " set it as dynamic property. If you wish to explicitly enable consumer block (at your own risk)"
                        + " set its value to -1.");
        props.setProperty("consumer.timeout.ms", "1");
    }

    int partitionCount = KafkaUtils.retrievePartitionCountForTopic(
            context.getProperty(ZOOKEEPER_CONNECTION_STRING).evaluateAttributeExpressions().getValue(), context.getProperty(TOPIC).evaluateAttributeExpressions().getValue());

    final ConsumerConfig consumerConfig = new ConsumerConfig(props);
    consumer = Consumer.createJavaConsumerConnector(consumerConfig);

    final Map<String, Integer> topicCountMap = new HashMap<>(1);

    int concurrentTaskToUse = context.getMaxConcurrentTasks();
    if (context.getMaxConcurrentTasks() < partitionCount){
        this.getLogger().warn("The amount of concurrent tasks '" + context.getMaxConcurrentTasks() + "' configured for "
                + "this processor is less than the amount of partitions '" + partitionCount + "' for topic '" + context.getProperty(TOPIC).evaluateAttributeExpressions().getValue() + "'. "
            + "Consider making it equal to the amount of partition count for most efficient event consumption.");
    } else if (context.getMaxConcurrentTasks() > partitionCount){
        concurrentTaskToUse = partitionCount;
        this.getLogger().warn("The amount of concurrent tasks '" + context.getMaxConcurrentTasks() + "' configured for "
                + "this processor is greater than the amount of partitions '" + partitionCount + "' for topic '" + context.getProperty(TOPIC).evaluateAttributeExpressions().getValue() + "'. "
            + "Therefore those tasks would never see a message. To avoid that the '" + partitionCount + "'(partition count) will be used to consume events");
    }

    topicCountMap.put(topic, concurrentTaskToUse);

    final Map<String, List<KafkaStream<byte[], byte[]>>> consumerMap = consumer.createMessageStreams(topicCountMap);
    final List<KafkaStream<byte[], byte[]>> streams = consumerMap.get(topic);

    this.streamIterators.clear();

    for (final KafkaStream<byte[], byte[]> stream : streams) {
        streamIterators.add(stream.iterator());
    }
    this.consumerStreamsReady.set(true);
}
 
Example 19
Source File: IngestFromKafkaDriver.java    From geowave with Apache License 2.0 3 votes vote down vote up
private ConsumerConnector buildKafkaConsumer() {

    final Properties kafkaProperties = kafkaOptions.getProperties();

    final ConsumerConnector consumer =
        Consumer.createJavaConsumerConnector(new ConsumerConfig(kafkaProperties));

    return consumer;
  }
 
Example 20
Source File: JavaKafkaConsumerHighAPIHdfsImpl.java    From dk-fitting with Apache License 2.0 3 votes vote down vote up
/**
 * 构造函数
 *
 * @param topic      Kafka消息Topic主题
 * @param numThreads 处理数据的线程数/可以理解为Topic的分区数
 * @param zookeeper  Kafka的Zookeeper连接字符串
 * @param groupId    该消费者所属group ID的值
 */
public JavaKafkaConsumerHighAPIHdfsImpl(String topic, int numThreads, String zookeeper, String groupId) {
    // 1. 创建Kafka连接器
    this.consumer = Consumer.createJavaConsumerConnector(createConsumerConfig(zookeeper, groupId));
    // 2. 数据赋值
    this.topic = topic;
    this.numThreads = numThreads;
}