org.apache.kafka.clients.consumer.KafkaConsumer Java Examples

The following examples show how to use org.apache.kafka.clients.consumer.KafkaConsumer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ConsumerExample.java    From pulsar with Apache License 2.0 8 votes vote down vote up
public static void main(String[] args) {
    String topic = "persistent://public/default/test";

    Properties props = new Properties();
    props.put("bootstrap.servers", "pulsar://localhost:6650");
    props.put("group.id", "my-subscription-name");
    props.put("enable.auto.commit", "false");
    props.put("key.deserializer", IntegerDeserializer.class.getName());
    props.put("value.deserializer", StringDeserializer.class.getName());

    @SuppressWarnings("resource")
    Consumer<Integer, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList(topic));

    while (true) {
        ConsumerRecords<Integer, String> records = consumer.poll(100);
        records.forEach(record -> {
            log.info("Received record: {}", record);
        });

        // Commit last offset
        consumer.commitSync();
    }
}
 
Example #2
Source File: SynapseKafkaAutoConfiguration.java    From synapse with Apache License 2.0 7 votes vote down vote up
@Bean
@ConditionalOnMissingBean(name = "kafkaMessageLogReceiverEndpointFactory")
public MessageLogReceiverEndpointFactory kafkaMessageLogReceiverEndpointFactory(final KafkaProperties kafkaProperties,
                                                                                final MessageInterceptorRegistry interceptorRegistry,
                                                                                final ApplicationEventPublisher eventPublisher,
                                                                                final ConsumerFactory<String, String> kafkaConsumerFactory) {
    LOG.info("Auto-configuring Kafka MessageLogReceiverEndpointFactory");
    final ExecutorService executorService = newCachedThreadPool(
            new ThreadFactoryBuilder().setNameFormat("kafka-message-log-%d").build()
    );

    final KafkaConsumer<String, String> kafkaConsumer = (KafkaConsumer<String, String>)kafkaConsumerFactory.createConsumer();

    return new KafkaMessageLogReceiverEndpointFactory(
            interceptorRegistry,
            kafkaConsumer,
            executorService,
            eventPublisher);
}
 
Example #3
Source File: Example.java    From kafka-serializer-example with MIT License 7 votes vote down vote up
public static void runConsumer(Properties properties, String topic) throws Exception {
    properties.put("group.id", "test");
    properties.put("enable.auto.commit", "true");
    properties.put("auto.commit.interval.ms", "1000");
    properties.put("session.timeout.ms", "30000");
    properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

    System.out.printf("Running consumer with serializer %s on topic %s\n", properties.getProperty("value.deserializer"), topic);

    KafkaConsumer<String, SensorReading> consumer = new KafkaConsumer<>(properties);
    consumer.subscribe(Arrays.asList(topic));
    while (true) {
        ConsumerRecords<String, SensorReading> records = consumer.poll(100);
        for (ConsumerRecord<String, SensorReading> record : records)
            System.out.printf("offset = %d, key = %s, value = %s\n", record.offset(), record.key(), record.value());
    }
}
 
Example #4
Source File: ConsumerAOC.java    From javatech with Creative Commons Attribution Share Alike 4.0 International 6 votes vote down vote up
public static void main(String[] args) {
	// 1. 指定消费者的配置
	final Properties props = new Properties();
	props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, HOST);
	props.put(ConsumerConfig.GROUP_ID_CONFIG, "test");
	props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
	props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
	props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG,
		"org.apache.kafka.common.serialization.StringDeserializer");
	props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
		"org.apache.kafka.common.serialization.StringDeserializer");

	// 2. 使用配置初始化 Kafka 消费者
	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

	// 3. 消费者订阅 Topic
	consumer.subscribe(Arrays.asList("t1"));
	while (true) {
		// 4. 消费消息
		ConsumerRecords<String, String> records = consumer.poll(100);
		for (ConsumerRecord<String, String> record : records) {
			System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
		}
	}
}
 
Example #5
Source File: KafkaClient.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
public static Map<Integer, Long> getLatestOffsets(final CubeInstance cubeInstance) {
    final KafkaConfig kafkaConfig = KafkaConfigManager.getInstance(KylinConfig.getInstanceFromEnv()).getKafkaConfig(cubeInstance.getRootFactTable());

    final String brokers = KafkaClient.getKafkaBrokers(kafkaConfig);
    final String topic = kafkaConfig.getTopic();

    Map<Integer, Long> startOffsets = Maps.newHashMap();
    try (final KafkaConsumer consumer = KafkaClient.getKafkaConsumer(brokers, cubeInstance.getName())) {
        final List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
        for (PartitionInfo partitionInfo : partitionInfos) {
            long latest = getLatestOffset(consumer, topic, partitionInfo.partition());
            startOffsets.put(partitionInfo.partition(), latest);
        }
    }
    return startOffsets;
}
 
Example #6
Source File: KafkaTestUtils.java    From kafka-junit with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
/**
 * Return Kafka Consumer configured to consume from internal Kafka Server.
 * @param <K> Type of message key
 * @param <V> Type of message value
 * @param keyDeserializer Class of deserializer to be used for keys.
 * @param valueDeserializer Class of deserializer to be used for values.
 * @param config Additional consumer configuration options to be set.
 * @return KafkaProducer configured to produce into Test server.
 */
public <K, V> KafkaConsumer<K, V> getKafkaConsumer(
    final Class<? extends Deserializer<K>> keyDeserializer,
    final Class<? extends Deserializer<V>> valueDeserializer,
    final Properties config
) {

    // Build config
    final Map<String, Object> kafkaConsumerConfig = buildDefaultClientConfig();
    kafkaConsumerConfig.put("key.deserializer", keyDeserializer);
    kafkaConsumerConfig.put("value.deserializer", valueDeserializer);
    kafkaConsumerConfig.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.RoundRobinAssignor");

    // Override config
    if (config != null) {
        for (final Map.Entry<Object, Object> entry: config.entrySet()) {
            kafkaConsumerConfig.put(entry.getKey().toString(), entry.getValue());
        }
    }

    // Create and return Consumer.
    return new KafkaConsumer<>(kafkaConsumerConfig);
}
 
Example #7
Source File: CheckBeginingOffset.java    From BigData-In-Practice with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumer<String, String> kafkaConsumer = createNewConsumer();
    List<PartitionInfo> partitions = kafkaConsumer.partitionsFor("topic-monitor");
    List<TopicPartition> tpList = partitions.stream()
            .map(pInfo -> new TopicPartition(pInfo.topic(), pInfo.partition()))
            .collect(toList());
    Map<TopicPartition, Long> beginningOffsets =
            kafkaConsumer.beginningOffsets(tpList);
    System.out.println(beginningOffsets);
}
 
Example #8
Source File: Launcher.java    From SkyEye with GNU General Public License v3.0 6 votes vote down vote up
public static void main(String[] args) {
    SpringApplicationBuilder builder = new SpringApplicationBuilder(Launcher.class);
    Set<ApplicationListener<?>> listeners = builder.application().getListeners();
    for (Iterator<ApplicationListener<?>> it = listeners.iterator(); it.hasNext();) {
        ApplicationListener<?> listener = it.next();
        if (listener instanceof LoggingApplicationListener) {
            it.remove();
        }
    }
    builder.application().setListeners(listeners);
    ConfigurableApplicationContext context = builder.run(args);
    LOGGER.info("collector metrics start successfully");

    KafkaConsumer kafkaConsumer = (KafkaConsumer<byte[], String>) context.getBean("kafkaConsumer");
    Task task = (Task) context.getBean("metricsTask");

    // 优雅停止项目
    Runtime.getRuntime().addShutdownHook(new ShutdownHookRunner(kafkaConsumer, task));
    task.doTask();
}
 
Example #9
Source File: KafkaAdminClientTest.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
@Test
public void getConsumerGroupSummary() {
    client.createTopic(testName.getMethodName(), 1, 1);

    Properties properties = new Properties();
    properties.putAll(KafkaTests.getProps());
    properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class.getName());
    properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, BytesDeserializer.class.getName());
    properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testName.getMethodName());
    properties.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testName.getMethodName() + "-client-id");

    try (Consumer<Object, Object> consumer = new KafkaConsumer<>(properties)) {
        consumer.subscribe(Arrays.asList(testName.getMethodName()));
        consumer.poll(Duration.ofSeconds(5L));

        AdminClient.ConsumerGroupSummary summary = client.getConsumerGroupSummary(testName.getMethodName());
        assertThat("Expected only 1 consumer summary when getConsumerGroupSummaries(" + testName.getMethodName() + ")",
                convertToJavaSet(summary.consumers().get().iterator()).size(), is(1));

        assertThat(summary.state(), is(notNullValue()));
        assertThat(summary.coordinator(), is(notNullValue()));
        assertThat(summary.assignmentStrategy(), is(notNullValue()));
    }
}
 
Example #10
Source File: KafkaTopicBroadcaster.java    From cqrs-eventsourcing-kafka with Apache License 2.0 6 votes vote down vote up
public KafkaTopicBroadcaster(String name, ObjectMapper objectMapper, String zookeeper) {
    super();
    Properties props = new Properties();
    try {
        props.put("client.id", InetAddress.getLocalHost().getHostName());
    } catch (UnknownHostException e) {
        throw new RuntimeException();
    }
    props.put("bootstrap.servers", zookeeper);
    props.put("group.id", name);
    props.put("key.deserializer", StringDeserializer.class);
    props.put("value.deserializer", StringDeserializer.class);
    props.put("enable.auto.commit", "false");
    props.put("auto.offset.reset", "earliest");

    this.consumer = new KafkaConsumer(props);
    this.objectMapper = objectMapper;
}
 
Example #11
Source File: OffsetCommitSyncBatch.java    From BigData-In-Practice with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create();
    final int minBatchSize = 200;
    List<ConsumerRecord> buffer = new ArrayList<>();
    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
        for (ConsumerRecord<String, String> record : records) {
            buffer.add(record);
            System.out.println(record.offset() + " : " + record.value());
        }
        if (buffer.size() >= minBatchSize) {
            //do some logical processing with buffer.
            consumer.commitSync();
            buffer.clear();
        }
    }
}
 
Example #12
Source File: KafkaConsumeOrderWorkaround.java    From flowing-retail with Apache License 2.0 6 votes vote down vote up
@PostConstruct
public void startConsuming() {
  consumerThread = new Thread("kafka-workaround-consumer") {
    public void run( ) {
      
      final Properties props = new Properties();
      props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
      props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup);
      props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
      props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

      consumer = new KafkaConsumer<>(props);
      consumer.subscribe(Collections.singletonList(topicName));
      while (running) {
        consumer.poll(pollingInterval);
        consumer.commitAsync();
      }        
      consumer.close();
    }
  };
  consumerThread.start();
}
 
Example #13
Source File: AtlasNotificationServerEmulator.java    From nifi with Apache License 2.0 6 votes vote down vote up
public void consume(Consumer<HookNotification> c) {
    Properties props = new Properties();
    props.put("bootstrap.servers", "localhost:9092");
    props.put("group.id", "test");
    props.put("enable.auto.commit", "true");
    props.put("auto.commit.interval.ms", "1000");
    props.put("session.timeout.ms", "30000");
    props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
    consumer.subscribe(Arrays.asList("ATLAS_HOOK"));

    isStopped = false;
    while (!isStopped) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        for (ConsumerRecord<String, String> record : records) {
            final MessageDeserializer deserializer = NotificationInterface.NotificationType.HOOK.getDeserializer();
            final HookNotification m
                    = (HookNotification) deserializer.deserialize(record.value());
            c.accept(m);
        }
    }

    consumer.close();
}
 
Example #14
Source File: KafkaConsumerFromOffset.java    From post-kafka-rewind-consumer-offset with MIT License 6 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = createConsumer();
    consumer.subscribe(Arrays.asList(TOPIC));

    boolean flag = true;


    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);
        if (flag) {
            Set<TopicPartition> assignments = consumer.assignment();
            assignments.forEach(topicPartition ->
                    consumer.seek(
                            topicPartition,
                            90));
            flag = false;
        }


        for (ConsumerRecord<String, String> record : records)
            System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
    }


}
 
Example #15
Source File: KafkaEasyTransMsgConsumerImpl.java    From EasyTransaction with Apache License 2.0 6 votes vote down vote up
public KafkaEasyTransMsgConsumerImpl(ConsumerConfig cfg, ObjectSerializer serializer,
		KafkaEasyTransMsgPublisherImpl retryQueueMsgProducer) {
	this.serializer = serializer;
	this.cfg = cfg;
	consumer = new KafkaConsumer<>(cfg.getNativeCfg());
	reconsumer = new KafkaConsumer<>(cfg.getNativeCfg());
	this.retryQueueMsgProducer = retryQueueMsgProducer;
	threadPool = Executors.newFixedThreadPool(cfg.getConsumerThread(), new NamedThreadFactory("KafkaMsgHandler"));

	// 计算每个重试次数对应的重试时间等级阈值
	List<List<Integer>> reconsumeCfg = cfg.getReconsume();
	initRetryThreshold(reconsumeCfg);
	initRetryRecordsMap();
	initRetryQueueSubscribe(reconsumeCfg);
	initRetryQueuePartitionCountMap();
}
 
Example #16
Source File: EarliestNativeTest.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {

    Map<String, String> config = new HashMap<>();
    config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
    config.put(ConsumerConfig.GROUP_ID_CONFIG, "my-group");
    config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
    config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
    config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
    config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

    KafkaConsumer consumer = new KafkaConsumer(config);
    consumer.subscribe(Collections.singleton("my-topic"));

    while (true) {
      ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
      for (ConsumerRecord<String, String> record: records) {
        System.out.println(record);
      }
    }
  }
 
Example #17
Source File: ParallelWebKafkaConsumer.java    From kafka-webview with MIT License 6 votes vote down vote up
private List<KafkaResult> consume(final KafkaConsumer kafkaConsumer) {
    final List<KafkaResult> kafkaResultList = new ArrayList<>();
    final ConsumerRecords<?,?> consumerRecords = kafkaConsumer.poll(pollTimeoutDuration);

    logger.info("Consumed {} records", consumerRecords.count());
    for (final ConsumerRecord consumerRecord : consumerRecords) {
        // Get next record
        // Convert to KafkaResult.
        final KafkaResult kafkaResult = new KafkaResult(
            consumerRecord.partition(),
            consumerRecord.offset(),
            consumerRecord.timestamp(),
            consumerRecord.key(),
            consumerRecord.value()
        );

        // Add to list.
        kafkaResultList.add(kafkaResult);
    }

    // Commit offsets
    commit(kafkaConsumer);
    return kafkaResultList;
}
 
Example #18
Source File: MaasAppenderEvent.java    From DBus with Apache License 2.0 6 votes vote down vote up
public MaasAppenderEvent(String topic, String dataTopic) {
    super(01);
    this.topic = topic;
    this.dataTopic = dataTopic;
    dao = new DbusDataDaoImpl();
    Properties props = HeartBeatConfigContainer.getInstance().getKafkaConsumerConfig();
    Properties producerProps = HeartBeatConfigContainer.getInstance().getmaasConf().getProducerProp();
    try {
        LoggerFactory.getLogger().info("[topic]   ...." + topic);
        LoggerFactory.getLogger().info("[maas-appender-event]  initial.........................");
        dataConsumer = new KafkaConsumer<>(props);
        partition0 = new TopicPartition(this.topic, 0);
        dataConsumer.assign(Arrays.asList(partition0));
        dataConsumer.seekToEnd(Arrays.asList(partition0));

        statProducer = new KafkaProducer<>(producerProps);

    } catch (Exception e) {
        e.printStackTrace();
    }
}
 
Example #19
Source File: StreamUtilsTest.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
private static <K, V> List<KeyValue<K, V>> consumeData(
    String topic,
    Class keyDeserializer,
    Class valueDeserializer,
    int expectedNumMessages,
    long resultsPollMaxTimeMs) {

    List<KeyValue<K, V>> result = new ArrayList<>();

    Properties consumerConfig = ClientUtils.consumerConfig(CLUSTER.bootstrapServers(), "testgroup",
        keyDeserializer, valueDeserializer, new Properties());
    try (KafkaConsumer<K, V> consumer = new KafkaConsumer<>(consumerConfig)) {

        consumer.subscribe(Collections.singleton(topic));
        long pollStart = System.currentTimeMillis();
        long pollEnd = pollStart + resultsPollMaxTimeMs;
        while (System.currentTimeMillis() < pollEnd &&
            continueConsuming(result.size(), expectedNumMessages)) {
            for (ConsumerRecord<K, V> record :
                consumer.poll(Duration.ofMillis(Math.max(1, pollEnd - System.currentTimeMillis())))) {
                if (record.value() != null) {
                    result.add(new KeyValue<>(record.key(), record.value()));
                }
            }
        }
    }
    return result;
}
 
Example #20
Source File: PartitionAssignmentWatchdog.java    From ja-micro with Apache License 2.0 5 votes vote down vote up
public synchronized void subscriberInitialized(KafkaConsumer<String, String> realConsumer) {
    logger.debug("Adding subscriber");
    consumers.add(realConsumer);
    if (consumers.size() == 1) {
        startWatchdog();
    }
}
 
Example #21
Source File: KafkaSource.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
@Override
public String getMessageTemplate(StreamingSourceConfig streamingSourceConfig) {
    String template = null;
    KafkaConsumer<byte[], byte[]> consumer = null;
    try {
        String topicName = getTopicName(streamingSourceConfig.getProperties());
        Map<String, Object> config = getKafkaConf(streamingSourceConfig.getProperties());
        consumer = new KafkaConsumer<>(config);
        Set<TopicPartition> partitions = Sets.newHashSet(FluentIterable.from(consumer.partitionsFor(topicName))
                .transform(new Function<PartitionInfo, TopicPartition>() {
                    @Override
                    public TopicPartition apply(PartitionInfo input) {
                        return new TopicPartition(input.topic(), input.partition());
                    }
                }));
        consumer.assign(partitions);
        consumer.seekToBeginning(partitions);
        ConsumerRecords<byte[], byte[]> records = consumer.poll(500);
        if (records == null) {
            return null;
        }
        Iterator<ConsumerRecord<byte[], byte[]>> iterator = records.iterator();
        if (iterator == null || !iterator.hasNext()) {
            return null;
        }
        ConsumerRecord<byte[], byte[]> record = iterator.next();
        template = new String(record.value(), "UTF8");
    } catch (Exception e) {
        logger.error("error when fetch one record from kafka, stream:" + streamingSourceConfig.getName(), e);
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
    return template;
}
 
Example #22
Source File: RoadEndpointsIntegrationTest.java    From data-highway with Apache License 2.0 5 votes vote down vote up
private static KafkaConsumer<String, String> createPatchConsumer() {
  Properties properties = new Properties();
  properties.setProperty("bootstrap.servers", kafkaCluster.bootstrapServers());
  properties.setProperty("group.id", UUID.randomUUID().toString());
  properties.setProperty("auto.offset.reset", "earliest");
  properties.setProperty("enable.auto.commit", "false");
  KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties, new StringDeserializer(),
      new StringDeserializer());
  kafkaConsumer.subscribe(Lists.newArrayList(patchTopic));
  return kafkaConsumer;
}
 
Example #23
Source File: KafkaEventSource.java    From mewbase with MIT License 5 votes vote down vote up
@Override
public CompletableFuture<Subscription> subscribeAll(String channelName, EventHandler eventHandler) {
    TopicPartition partition0 = new TopicPartition(channelName, partitionZeroOnly);
    KafkaConsumer<String, byte[]> kafkaConsumer = createAndAssignConsumer(partition0);
    kafkaConsumer.seekToBeginning(Arrays.asList(partition0));
    return CompletableFuture.completedFuture(createAndRegisterSubscription(kafkaConsumer,eventHandler));
}
 
Example #24
Source File: KafkaMessageLogReceiverEndpointFactory.java    From synapse with Apache License 2.0 5 votes vote down vote up
public KafkaMessageLogReceiverEndpointFactory(final MessageInterceptorRegistry interceptorRegistry,
                                              final KafkaConsumer<String, String> kafkaConsumer,
                                              final ExecutorService kinesisMessageLogExecutorService,
                                              final ApplicationEventPublisher eventPublisher) {
    this.interceptorRegistry = interceptorRegistry;
    this.kafkaConsumer = kafkaConsumer;
    this.executorService = kinesisMessageLogExecutorService;
    this.eventPublisher = eventPublisher;
}
 
Example #25
Source File: KafkaConsumerTest.java    From springBoot-study with Apache License 2.0 5 votes vote down vote up
private void init() {
		Properties props = new Properties();
		//kafka消费的的地址
		props.put("bootstrap.servers", "master:9092,slave1:9092,slave2:9092");
		//组名 不同组名可以重复消费
		props.put("group.id", GROUPID);
		//是否自动提交
		props.put("enable.auto.commit", "false");
		//超时时间
		props.put("session.timeout.ms", "30000");
		//一次最大拉取的条数
		props.put("max.poll.records", 10);
//		earliest当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费 
//		latest 
//		当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 
//		none 
//		topic各分区都存在已提交的offset时,从offset后开始消费;只要有一个分区不存在已提交的offset,则抛出异常
		props.put("auto.offset.reset", "earliest");
		//序列化
		props.put("key.deserializer", StringDeserializer.class.getName());
		props.put("value.deserializer", StringDeserializer.class.getName());
		this.consumer = new KafkaConsumer<String, String>(props);
		//订阅主题列表topic
		this.consumer.subscribe(Arrays.asList(topic));
		
		System.out.println("初始化!");
	}
 
Example #26
Source File: NewApiTopicConsumer.java    From jeesuite-libs with Apache License 2.0 5 votes vote down vote up
private void commitOffsets(ConsumerWorker worker) {
	
	KafkaConsumer<String, Serializable> consumer = worker.consumer;
	if(worker.isCommiting())return;
	worker.setCommiting(true);
	try {

		if(worker.uncommittedOffsetMap.isEmpty())return ;
		
		logger.debug("committing the offsets : {}", worker.uncommittedOffsetMap);
		consumer.commitAsync(worker.uncommittedOffsetMap, new OffsetCommitCallback() {
			@Override
			public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
				//
				worker.setCommiting(false);
				if(exception == null){
					worker.resetUncommittedOffsetMap();
					logger.debug("committed the offsets : {}",offsets);
				}else{
					logger.error("committ the offsets error",exception);
				}
			}
		});
	} finally {
		
	}
}
 
Example #27
Source File: AConsumerHandler.java    From SO with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * constructor.<BR/>
 */
public AConsumerHandler(int id) {
    this.id = id;

    Properties properties = initProperties(getPropertiesMap());
    consumer = new KafkaConsumer<K, V>(properties);
}
 
Example #28
Source File: KafkaClient.java    From kylin with Apache License 2.0 5 votes vote down vote up
public static long getEarliestOffset(KafkaConsumer consumer, String topic, int partitionId) {

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
        consumer.assign(Arrays.asList(topicPartition));
        consumer.seekToBeginning(Arrays.asList(topicPartition));

        return consumer.position(topicPartition);
    }
 
Example #29
Source File: KafkaAdminFactory.java    From kafka-webview with MIT License 5 votes vote down vote up
/**
 * Create a new KafkaConsumer instance.
 * @param clusterConfig What cluster to connect to.
 * @param clientId What clientId to associate the connection with.
 * @return KafkaConsumer instance.
 */
public KafkaConsumer<String, String> createConsumer(final ClusterConfig clusterConfig, final String clientId) {
    // Create a map
    final Map<String, Object> config = configUtil.applyCommonSettings(clusterConfig, clientId);

    // Set required deserializer classes.
    config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
    config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);

    // Create consumer
    return new KafkaConsumer<>(config);
}
 
Example #30
Source File: KafkaSampleStore.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
MetricLoader(KafkaConsumer<byte[], byte[]> consumer,
             SampleLoader sampleLoader,
             AtomicLong numLoadedSamples,
             AtomicLong numPartitionMetricSamples,
             AtomicLong numBrokerMetricSamples,
             AtomicLong totalSamples) {
  _consumer = consumer;
  _sampleLoader = sampleLoader;
  _numLoadedSamples = numLoadedSamples;
  _numPartitionMetricSamples = numPartitionMetricSamples;
  _numBrokerMetricSamples = numBrokerMetricSamples;
  _totalSamples = totalSamples;
}