Java Code Examples for org.apache.kafka.clients.consumer.KafkaConsumer#seekToEnd()

The following examples show how to use org.apache.kafka.clients.consumer.KafkaConsumer#seekToEnd() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaUtils.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
public static long messageCount(String bootstrapServers, String topicName, int partition) {
    Properties props = new Properties();
    String consumerId = UUID.randomUUID().toString();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "spark-consumer-group-"+consumerId);
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, "spark-consumer-"+consumerId);
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ExternalizableDeserializer.class.getName());

    KafkaConsumer<Integer, Externalizable> consumer = new KafkaConsumer<Integer, Externalizable>(props);

    TopicPartition topicPartition = new TopicPartition(topicName, partition);
    List<TopicPartition> partitionList = Arrays.asList(topicPartition);
    consumer.assign(partitionList);
    consumer.seekToEnd(partitionList);
    long nextOffset = consumer.position(topicPartition);

    consumer.seekToBeginning(partitionList);
    long firstOffset = consumer.position(topicPartition);

    consumer.close();

    return nextOffset - firstOffset;
}
 
Example 2
Source File: KafkaDispatcherImpl.java    From arcusplatform with Apache License 2.0 6 votes vote down vote up
private void seekAndAssign(Collection<TopicPartition> partitions, KafkaConsumer<PlatformPartition, byte[]> consumer) {
	consumer.assign(partitions);
	if(config.isTransientOffsets()) {
		logger.info("Transient offsets enabled, seeking to latest");
		consumer.seekToEnd(partitions);
	}
	else {
		Set<TopicPartition> unknownPartitions = new HashSet<>();
		for(TopicPartition tp: partitions) {
			OffsetAndMetadata om = consumer.committed(tp);
			if(om == null) {
				unknownPartitions.add(tp);
			}
		}
	}
}
 
Example 3
Source File: CheckFlowLineHandler.java    From DBus with Apache License 2.0 6 votes vote down vote up
private List<Object> initConsumer(String topic, String step) {
    Properties props = obtainKafkaConsumerProps();
    props.put("group.id", "auto-check-allinone-consumer-groupid-ss-" + step);
    props.put("client.id", "auto-check-allinone-consumer-clientid-ss-" + step);
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(props);
    List<TopicPartition> assignTopics = new ArrayList<>();
    assignTopics(consumer.partitionsFor(topic), assignTopics);
    consumer.assign(assignTopics);
    consumer.seekToEnd(assignTopics);
    long position = consumer.position(assignTopics.get(0));

    List<Object> list = new ArrayList<>();
    list.add(consumer);
    list.add(position);
    return list;
}
 
Example 4
Source File: FlowLineCheckService.java    From DBus with Apache License 2.0 6 votes vote down vote up
private List<Object> initConsumer(String topic, String step) throws Exception {
    Properties props = obtainKafkaConsumerProps();
    props.put("group.id", "auto-check-table-consumer-groupid-ss-" + step);
    props.put("client.id", "auto-check-table-consumer-clientid-ss-" + step);
    props.put("enable.auto.commit", false);
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(props);
    List<TopicPartition> assignTopics = new ArrayList<>();
    assignTopics(consumer.partitionsFor(topic), assignTopics);
    consumer.assign(assignTopics);
    consumer.seekToEnd(assignTopics);
    long position = consumer.position(assignTopics.get(0));

    logger.info("topic: {}, end position: {}", topic, position);

    List<Object> list = new ArrayList<>();
    list.add(consumer);
    list.add(position);
    return list;
}
 
Example 5
Source File: KafkaTestHelper.java    From nakadi with MIT License 6 votes vote down vote up
public List<Cursor> getNextOffsets(final String topic) {

        final KafkaConsumer<String, String> consumer = createConsumer();
        final List<TopicPartition> partitions = consumer
                .partitionsFor(topic)
                .stream()
                .map(pInfo -> new TopicPartition(topic, pInfo.partition()))
                .collect(Collectors.toList());

        consumer.assign(partitions);
        consumer.seekToEnd(partitions);

        return partitions
                .stream()
                .map(partition -> new Cursor(Integer.toString(partition.partition()),
                        Long.toString(consumer.position(partition))))
                .collect(Collectors.toList());
    }
 
Example 6
Source File: KafkaClient.java    From kylin with Apache License 2.0 5 votes vote down vote up
public static long getLatestOffset(KafkaConsumer consumer, String topic, int partitionId) {

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
        consumer.assign(Arrays.asList(topicPartition));
        consumer.seekToEnd(Arrays.asList(topicPartition));

        return consumer.position(topicPartition);
    }
 
Example 7
Source File: KafkaClient.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public static long getLatestOffset(KafkaConsumer consumer, String topic, int partitionId) {

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
        consumer.assign(Arrays.asList(topicPartition));
        consumer.seekToEnd(Arrays.asList(topicPartition));

        return consumer.position(topicPartition);
    }
 
Example 8
Source File: SinkerKafkaSource.java    From DBus with Apache License 2.0 5 votes vote down vote up
public SinkerKafkaSource() throws IOException, PropertyException {
    Properties config = ConfUtils.getProps(CONFIG_PROPERTIES);
    topic = config.getProperty(Constants.SINKER_HEARTBEAT_TOPIC);
    if (topic == null) {
        throw new PropertyException("[sinker] 配置参数文件内容不能为空! " + Constants.SINKER_HEARTBEAT_TOPIC);
    }

    topicPartition = new TopicPartition(topic, 0);

    Properties statProps = ConfUtils.getProps(CONSUMER_PROPERTIES);
    statProps.setProperty("enable.auto.commit", "true");
    statProps.setProperty("client.id", "heartbeat_consumer_sinker_client");
    List<TopicPartition> topics = Arrays.asList(topicPartition);
    //security
    if (KafkaUtil.checkSecurity()) {
        statProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
    }

    LOG.info("[sinker] SinkerKafkaSource message: set max.poll.records=1000");
    statProps.setProperty("max.poll.records", "1000");

    consumer = new KafkaConsumer(statProps);
    consumer.assign(topics);

    long beforeOffset = consumer.position(topicPartition);
    String offset = config.getProperty("sinker.kafka.offset");
    if (StringUtils.isBlank(offset) || offset.equalsIgnoreCase("none")) {
        // do nothing
    } else if (offset.equalsIgnoreCase("begin")) {
        consumer.seekToBeginning(Lists.newArrayList(topicPartition));
    } else if (offset.equalsIgnoreCase("end")) {
        consumer.seekToEnd(Lists.newArrayList(topicPartition));
    } else {
        long nOffset = Long.parseLong(offset);
        consumer.seek(topicPartition, nOffset);
    }
    long afferOffset = consumer.position(topicPartition);
    LOG.info("[sinker] SinkerKafkaSource init OK. beforeOffset {}, afferOffset={}", beforeOffset, afferOffset);
}
 
Example 9
Source File: KafkaOffsetGetter.java    From Kafka-Insight with Apache License 2.0 5 votes vote down vote up
/**
 * When an object implementing interface <code>Runnable</code> is used
 * to create a thread, starting the thread causes the object's
 * <code>run</code> method to be called in that separately executing
 * thread.
 * <p>
 * The general contract of the method <code>run</code> is that it may
 * take any action whatsoever.
 *
 * @see Thread#run()
 */
@Override
public void run() {
    String group = "kafka-insight-logOffsetListener";
    int sleepTime = 60000;
    KafkaConsumer<Array<Byte>, Array<Byte>> kafkaConsumer = null;

    while (true) {

        try {
            if (null == kafkaConsumer) {
                kafkaConsumer = KafkaUtils.createNewKafkaConsumer(brokersInfo, group);
            }

            Map<String, List<PartitionInfo>> topicPartitionsMap = kafkaConsumer.listTopics();
            for (List<PartitionInfo> partitionInfoList : topicPartitionsMap.values()) {
                for (PartitionInfo partitionInfo : partitionInfoList) {
                    TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
                    Collection<TopicPartition> topicPartitions = Arrays.asList(topicPartition);
                    kafkaConsumer.assign(topicPartitions);
                    kafkaConsumer.seekToEnd(topicPartitions);
                    Long logEndOffset = kafkaConsumer.position(topicPartition);
                    logEndOffsetMap.put(topicPartition, logEndOffset);
                }
            }

            Thread.sleep(sleepTime);

        } catch (Exception e) {
            e.printStackTrace();
            if (null != kafkaConsumer) {
                kafkaConsumer.close();
                kafkaConsumer = null;
            }
        }
    }

}
 
Example 10
Source File: KafkaConsumerCommand.java    From azeroth with Apache License 2.0 5 votes vote down vote up
protected long getLogSize(KafkaConsumer<String, Serializable> kafkaConsumer, String topic,
                          int partition) {
    TopicPartition topicPartition = new TopicPartition(topic, partition);
    List<TopicPartition> asList = Arrays.asList(topicPartition);
    kafkaConsumer.assign(asList);
    kafkaConsumer.seekToEnd(asList);
    long logEndOffset = kafkaConsumer.position(topicPartition);
    return logEndOffset;
}
 
Example 11
Source File: KafkaConsumerCommand.java    From jeesuite-libs with Apache License 2.0 5 votes vote down vote up
protected long getLogSize(KafkaConsumer<String, Serializable> kafkaConsumer,String topic, int partition) {
	TopicPartition topicPartition = new TopicPartition(topic, partition);
	List<TopicPartition> asList = Arrays.asList(topicPartition);
	kafkaConsumer.assign(asList);
	kafkaConsumer.seekToEnd(asList);
	long logEndOffset = kafkaConsumer.position(topicPartition);
	return logEndOffset;
}
 
Example 12
Source File: KafkaEventSource.java    From mewbase with MIT License 5 votes vote down vote up
@Override
public CompletableFuture<Subscription> subscribeFromMostRecent(String channelName, EventHandler eventHandler) {
    TopicPartition partition0 = new TopicPartition(channelName, partitionZeroOnly);
    KafkaConsumer<String, byte[]> kafkaConsumer = createAndAssignConsumer(partition0);
    kafkaConsumer.seekToEnd(Arrays.asList(partition0));
    final long offset = kafkaConsumer.position(partition0);
    kafkaConsumer.seek(partition0 , offset-1);
    return CompletableFuture.completedFuture(createAndRegisterSubscription(kafkaConsumer,eventHandler));
}
 
Example 13
Source File: KafkaEventSource.java    From mewbase with MIT License 5 votes vote down vote up
@Override
public CompletableFuture<Subscription>  subscribe(String channelName, EventHandler eventHandler) {
    TopicPartition partition0 = new TopicPartition(channelName, partitionZeroOnly);
    KafkaConsumer<String, byte[]> kafkaConsumer = createAndAssignConsumer(partition0);
    kafkaConsumer.seekToEnd(Arrays.asList(partition0));
    return CompletableFuture.completedFuture(createAndRegisterSubscription(kafkaConsumer,eventHandler));
}
 
Example 14
Source File: KafkaService.java    From cerberus-source with GNU General Public License v3.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public AnswerItem<Map<TopicPartition, Long>> seekEvent(String topic, String bootstrapServers,
        List<AppServiceHeader> serviceHeader) throws InterruptedException, ExecutionException {

    MessageEvent message = new MessageEvent(MessageEventEnum.ACTION_SUCCESS_CALLSERVICE_SEARCHKAFKA);
    AnswerItem<Map<TopicPartition, Long>> result = new AnswerItem<>();

    KafkaConsumer consumer = null;

    try {

        Properties props = new Properties();
        serviceHeader.add(factoryAppServiceHeader.create(null, "bootstrap.servers", bootstrapServers, "Y", 0, "", "", null, "", null));
        serviceHeader.add(factoryAppServiceHeader.create(null, "enable.auto.commit", "false", "Y", 0, "", "", null, "", null));
        serviceHeader.add(factoryAppServiceHeader.create(null, "max.poll.records", "10", "Y", 0, "", "", null, "", null));
        serviceHeader.add(factoryAppServiceHeader.create(null, "key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer", "Y", 0, "", "", null, "", null));
        serviceHeader.add(factoryAppServiceHeader.create(null, "value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer", "Y", 0, "", "", null, "", null));

        for (AppServiceHeader object : serviceHeader) {
            if (StringUtil.parseBoolean(object.getActive())) {
                props.put(object.getKey(), object.getValue());
            }
        }

        LOG.info("Open Consumer : " + getKafkaConsumerKey(topic, bootstrapServers));
        consumer = new KafkaConsumer<>(props);

        //Get a list of the topics' partitions
        List<PartitionInfo> partitionList = consumer.partitionsFor(topic);

        if (partitionList == null) {

            message = new MessageEvent(MessageEventEnum.ACTION_FAILED_CALLSERVICE_SEEKKAFKA);
            message.setDescription(message.getDescription().replace("%EX%", "Maybe Topic does not exist.").replace("%TOPIC%", topic).replace("%HOSTS%", bootstrapServers));

        } else {

            List<TopicPartition> topicPartitionList = partitionList.stream().map(info -> new TopicPartition(topic, info.partition())).collect(Collectors.toList());
            //Assign all the partitions to this consumer
            consumer.assign(topicPartitionList);
            consumer.seekToEnd(topicPartitionList); //default to latest offset for all partitions

            HashMap<TopicPartition, Long> valueResult = new HashMap<>();

            Map<TopicPartition, Long> partitionOffset = consumer.endOffsets(topicPartitionList);

            result.setItem(partitionOffset);

        }

    } catch (Exception ex) {
        message = new MessageEvent(MessageEventEnum.ACTION_FAILED_CALLSERVICE_SEEKKAFKA);
        message.setDescription(message.getDescription().replace("%EX%", ex.toString()).replace("%TOPIC%", topic).replace("%HOSTS%", bootstrapServers));
        LOG.debug(ex, ex);
    } finally {
        if (consumer != null) {
            consumer.close();
            LOG.info("Closed Consumer : " + getKafkaConsumerKey(topic, bootstrapServers));
        } else {
            LOG.info("Consumer not opened : " + getKafkaConsumerKey(topic, bootstrapServers));
        }
    }
    result.setResultMessage(message);
    return result;
}
 
Example 15
Source File: KafkaConsumerCallBridge09.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public void seekPartitionToEnd(KafkaConsumer<?, ?> consumer, TopicPartition partition) {
	consumer.seekToEnd(partition);
}
 
Example 16
Source File: DuplicatePublishingDetector.java    From light-eventuate-4j with Apache License 2.0 4 votes vote down vote up
private Optional<BinlogFileOffset> fetchMaxOffsetFor(String destinationTopic) {
  String subscriberId = "duplicate-checker-" + destinationTopic + "-" + System.currentTimeMillis();
  Properties consumerProperties = ConsumerPropertiesFactory.makeConsumerProperties(config, subscriberId);
  KafkaConsumer<String, String> consumer = new KafkaConsumer<>(consumerProperties);

  List<PartitionInfo> partitions = EventuateKafkaConsumer.verifyTopicExistsBeforeSubscribing(consumer, destinationTopic);

  List<TopicPartition> topicPartitionList = partitions.stream().map(p -> new TopicPartition(destinationTopic, p.partition())).collect(toList());
  consumer.assign(topicPartitionList);
  consumer.poll(0);

  logger.info("Seeking to end");

  try {
    consumer.seekToEnd(topicPartitionList);
  } catch (IllegalStateException e) {
    logger.error("Error seeking " + destinationTopic, e);
    return Optional.empty();
  }
  List<PartitionOffset> positions = topicPartitionList.stream()
          .map(tp -> new PartitionOffset(tp.partition(), consumer.position(tp) - 1))
          .filter(po -> po.offset >= 0)
          .collect(toList());

  logger.info("Seeking to positions=" + positions);

  positions.forEach(po -> {
    consumer.seek(new TopicPartition(destinationTopic, po.partition), po.offset);
  });

  logger.info("Polling for records");

  List<ConsumerRecord<String, String>> records = new ArrayList<>();
  while (records.size()<positions.size()) {
    ConsumerRecords<String, String> consumerRecords = consumer.poll(1000);
    consumerRecords.forEach(records::add);
  }

  logger.info("Got records: {}", records.size());
  Optional<BinlogFileOffset> max = StreamSupport.stream(records.spliterator(), false).map(record -> {
    logger.info(String.format("got record: %s %s %s", record.partition(), record.offset(), record.value()));
    return JSonMapper.fromJson(record.value(), PublishedEvent.class).getBinlogFileOffset();
  }).filter(binlogFileOffset -> binlogFileOffset!=null).max((blfo1, blfo2) -> blfo1.isSameOrAfter(blfo2) ? 1 : -1);
  consumer.close();
  return max;
}
 
Example 17
Source File: ProjectTableService.java    From DBus with Apache License 2.0 4 votes vote down vote up
public List<Map<String, String>> getTopicOffsets(String topic) {
    KafkaConsumer<String, String> consumer = null;
    try {
        Properties consumerProps = zkService.getProperties(KeeperConstants.KEEPER_CONSUMER_CONF);
        consumerProps.setProperty("client.id", "");
        consumerProps.setProperty("group.id", "topic.offsets.reader.temp");
        Properties globalConf = zkService.getProperties(KeeperConstants.GLOBAL_CONF);
        consumerProps.setProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS, globalConf.getProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS));
        if (StringUtils.equals(SecurityConfProvider.getSecurityConf(zkService), Constants.SECURITY_CONFIG_TRUE_VALUE)) {
            consumerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        }
        List<Map<String, String>> topicMsg = new ArrayList<>();
        // 新建consumer
        consumer = new KafkaConsumer<String, String>(consumerProps);
        /*//订阅topic(订阅所有partition,否则会抛出"You can only check the position for partitions assigned to this consumer.")
        consumer.subscribe(Arrays.asList(topic));*/
        // 获取topic的partition列表
        List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);

        // 获取每个partition信息
        for (PartitionInfo partitionInfo : partitionInfos) {
            int partition = partitionInfo.partition();
            TopicPartition topicPartition = new TopicPartition(topic, partition);
            consumer.assign(Arrays.asList(topicPartition));

            consumer.seekToEnd(consumer.assignment());
            //下一次拉取位置
            long nextFetchOffset = consumer.position(topicPartition);

            consumer.seekToBeginning(consumer.assignment());
            long headOffset = consumer.position(topicPartition);

            Map<String, String> partitionMsg = new HashedMap();
            partitionMsg.put("topic", topic);
            partitionMsg.put("partition", String.valueOf(partition));
            partitionMsg.put("latestOffset", String.valueOf(nextFetchOffset));
            partitionMsg.put("headOffset", String.valueOf(headOffset));
            topicMsg.add(partitionMsg);

        }

        return topicMsg;
    } catch (Exception e) {
        logger.error("[table topic offset] Error encountered while getting topic messages. topic:{}", topic);
        return null;
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
}
 
Example 18
Source File: KafkaConsumerCallBridge010.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public void seekPartitionToEnd(KafkaConsumer<?, ?> consumer, TopicPartition partition) {
	consumer.seekToEnd(Collections.singletonList(partition));
}
 
Example 19
Source File: KafkaConsumerCallBridge09.java    From flink with Apache License 2.0 4 votes vote down vote up
public void seekPartitionToEnd(KafkaConsumer<?, ?> consumer, TopicPartition partition) {
	consumer.seekToEnd(partition);
}
 
Example 20
Source File: KafkaConsumerCallBridge010.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public void seekPartitionToEnd(KafkaConsumer<?, ?> consumer, TopicPartition partition) {
	consumer.seekToEnd(Collections.singletonList(partition));
}