Java Code Examples for org.apache.kafka.clients.consumer.KafkaConsumer#position()

The following examples show how to use org.apache.kafka.clients.consumer.KafkaConsumer#position() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaUtils.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
public static long messageCount(String bootstrapServers, String topicName, int partition) {
    Properties props = new Properties();
    String consumerId = UUID.randomUUID().toString();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "spark-consumer-group-"+consumerId);
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, "spark-consumer-"+consumerId);
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ExternalizableDeserializer.class.getName());

    KafkaConsumer<Integer, Externalizable> consumer = new KafkaConsumer<Integer, Externalizable>(props);

    TopicPartition topicPartition = new TopicPartition(topicName, partition);
    List<TopicPartition> partitionList = Arrays.asList(topicPartition);
    consumer.assign(partitionList);
    consumer.seekToEnd(partitionList);
    long nextOffset = consumer.position(topicPartition);

    consumer.seekToBeginning(partitionList);
    long firstOffset = consumer.position(topicPartition);

    consumer.close();

    return nextOffset - firstOffset;
}
 
Example 2
Source File: CheckFlowLineHandler.java    From DBus with Apache License 2.0 6 votes vote down vote up
private List<Object> initConsumer(String topic, String step) {
    Properties props = obtainKafkaConsumerProps();
    props.put("group.id", "auto-check-allinone-consumer-groupid-ss-" + step);
    props.put("client.id", "auto-check-allinone-consumer-clientid-ss-" + step);
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(props);
    List<TopicPartition> assignTopics = new ArrayList<>();
    assignTopics(consumer.partitionsFor(topic), assignTopics);
    consumer.assign(assignTopics);
    consumer.seekToEnd(assignTopics);
    long position = consumer.position(assignTopics.get(0));

    List<Object> list = new ArrayList<>();
    list.add(consumer);
    list.add(position);
    return list;
}
 
Example 3
Source File: FlowLineCheckService.java    From DBus with Apache License 2.0 6 votes vote down vote up
private List<Object> initConsumer(String topic, String step) throws Exception {
    Properties props = obtainKafkaConsumerProps();
    props.put("group.id", "auto-check-table-consumer-groupid-ss-" + step);
    props.put("client.id", "auto-check-table-consumer-clientid-ss-" + step);
    props.put("enable.auto.commit", false);
    KafkaConsumer<String, byte[]> consumer = new KafkaConsumer<>(props);
    List<TopicPartition> assignTopics = new ArrayList<>();
    assignTopics(consumer.partitionsFor(topic), assignTopics);
    consumer.assign(assignTopics);
    consumer.seekToEnd(assignTopics);
    long position = consumer.position(assignTopics.get(0));

    logger.info("topic: {}, end position: {}", topic, position);

    List<Object> list = new ArrayList<>();
    list.add(consumer);
    list.add(position);
    return list;
}
 
Example 4
Source File: TestKafkaServiceImpl.java    From kafka-eagle with Apache License 2.0 6 votes vote down vote up
public Map<TopicPartition, Long> getKafkaLogSize(String topic, Set<Integer> partitionids) {
	Properties props = new Properties();
	props.put(ConsumerConfig.GROUP_ID_CONFIG, Kafka.KAFKA_EAGLE_SYSTEM_GROUP);
	props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "127.0.0.1:9092");
	props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getCanonicalName());
	KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
	Set<TopicPartition> tps = new HashSet<>();
	Map<Integer, Long> partitionOffset = new HashMap<Integer, Long>();
	for (int partitionid : partitionids) {
		TopicPartition tp = new TopicPartition(topic, partitionid);
		long offset = consumer.position(tp);
		partitionOffset.put(partitionid, offset);
	}

	System.out.println(partitionOffset.toString());

	if (consumer != null) {
		consumer.close();
	}
	return null;
}
 
Example 5
Source File: KafkaConsumerCommand.java    From azeroth with Apache License 2.0 5 votes vote down vote up
protected long getLogSize(KafkaConsumer<String, Serializable> kafkaConsumer, String topic,
                          int partition) {
    TopicPartition topicPartition = new TopicPartition(topic, partition);
    List<TopicPartition> asList = Arrays.asList(topicPartition);
    kafkaConsumer.assign(asList);
    kafkaConsumer.seekToEnd(asList);
    long logEndOffset = kafkaConsumer.position(topicPartition);
    return logEndOffset;
}
 
Example 6
Source File: KafkaClient.java    From kylin with Apache License 2.0 5 votes vote down vote up
public static long getLatestOffset(KafkaConsumer consumer, String topic, int partitionId) {

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
        consumer.assign(Arrays.asList(topicPartition));
        consumer.seekToEnd(Arrays.asList(topicPartition));

        return consumer.position(topicPartition);
    }
 
Example 7
Source File: KafkaClient.java    From kylin with Apache License 2.0 5 votes vote down vote up
public static long getEarliestOffset(KafkaConsumer consumer, String topic, int partitionId) {

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
        consumer.assign(Arrays.asList(topicPartition));
        consumer.seekToBeginning(Arrays.asList(topicPartition));

        return consumer.position(topicPartition);
    }
 
Example 8
Source File: KafkaEventSource.java    From mewbase with MIT License 5 votes vote down vote up
@Override
public CompletableFuture<Subscription> subscribeFromMostRecent(String channelName, EventHandler eventHandler) {
    TopicPartition partition0 = new TopicPartition(channelName, partitionZeroOnly);
    KafkaConsumer<String, byte[]> kafkaConsumer = createAndAssignConsumer(partition0);
    kafkaConsumer.seekToEnd(Arrays.asList(partition0));
    final long offset = kafkaConsumer.position(partition0);
    kafkaConsumer.seek(partition0 , offset-1);
    return CompletableFuture.completedFuture(createAndRegisterSubscription(kafkaConsumer,eventHandler));
}
 
Example 9
Source File: KafkaConsumerCommand.java    From jeesuite-libs with Apache License 2.0 5 votes vote down vote up
protected long getLogSize(KafkaConsumer<String, Serializable> kafkaConsumer,String topic, int partition) {
	TopicPartition topicPartition = new TopicPartition(topic, partition);
	List<TopicPartition> asList = Arrays.asList(topicPartition);
	kafkaConsumer.assign(asList);
	kafkaConsumer.seekToEnd(asList);
	long logEndOffset = kafkaConsumer.position(topicPartition);
	return logEndOffset;
}
 
Example 10
Source File: KafkaClient.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public static long getEarliestOffset(KafkaConsumer consumer, String topic, int partitionId) {

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
        consumer.assign(Arrays.asList(topicPartition));
        consumer.seekToBeginning(Arrays.asList(topicPartition));

        return consumer.position(topicPartition);
    }
 
Example 11
Source File: ParallelWebKafkaConsumer.java    From kafka-webview with MIT License 5 votes vote down vote up
private ConsumerState getConsumerState(final KafkaConsumer kafkaConsumer) {
    final List<PartitionOffset> offsets = new ArrayList<>();

    for (final TopicPartition topicPartition: getAllPartitions(kafkaConsumer)) {
        final long offset = kafkaConsumer.position(topicPartition);
        offsets.add(new PartitionOffset(topicPartition.partition(), offset));
    }

    return new ConsumerState(clientConfig.getTopicConfig().getTopicName(), offsets);
}
 
Example 12
Source File: KafkaOffsetGetter.java    From Kafka-Insight with Apache License 2.0 5 votes vote down vote up
/**
 * When an object implementing interface <code>Runnable</code> is used
 * to create a thread, starting the thread causes the object's
 * <code>run</code> method to be called in that separately executing
 * thread.
 * <p>
 * The general contract of the method <code>run</code> is that it may
 * take any action whatsoever.
 *
 * @see Thread#run()
 */
@Override
public void run() {
    String group = "kafka-insight-logOffsetListener";
    int sleepTime = 60000;
    KafkaConsumer<Array<Byte>, Array<Byte>> kafkaConsumer = null;

    while (true) {

        try {
            if (null == kafkaConsumer) {
                kafkaConsumer = KafkaUtils.createNewKafkaConsumer(brokersInfo, group);
            }

            Map<String, List<PartitionInfo>> topicPartitionsMap = kafkaConsumer.listTopics();
            for (List<PartitionInfo> partitionInfoList : topicPartitionsMap.values()) {
                for (PartitionInfo partitionInfo : partitionInfoList) {
                    TopicPartition topicPartition = new TopicPartition(partitionInfo.topic(), partitionInfo.partition());
                    Collection<TopicPartition> topicPartitions = Arrays.asList(topicPartition);
                    kafkaConsumer.assign(topicPartitions);
                    kafkaConsumer.seekToEnd(topicPartitions);
                    Long logEndOffset = kafkaConsumer.position(topicPartition);
                    logEndOffsetMap.put(topicPartition, logEndOffset);
                }
            }

            Thread.sleep(sleepTime);

        } catch (Exception e) {
            e.printStackTrace();
            if (null != kafkaConsumer) {
                kafkaConsumer.close();
                kafkaConsumer = null;
            }
        }
    }

}
 
Example 13
Source File: SinkerKafkaSource.java    From DBus with Apache License 2.0 5 votes vote down vote up
public SinkerKafkaSource() throws IOException, PropertyException {
    Properties config = ConfUtils.getProps(CONFIG_PROPERTIES);
    topic = config.getProperty(Constants.SINKER_HEARTBEAT_TOPIC);
    if (topic == null) {
        throw new PropertyException("[sinker] 配置参数文件内容不能为空! " + Constants.SINKER_HEARTBEAT_TOPIC);
    }

    topicPartition = new TopicPartition(topic, 0);

    Properties statProps = ConfUtils.getProps(CONSUMER_PROPERTIES);
    statProps.setProperty("enable.auto.commit", "true");
    statProps.setProperty("client.id", "heartbeat_consumer_sinker_client");
    List<TopicPartition> topics = Arrays.asList(topicPartition);
    //security
    if (KafkaUtil.checkSecurity()) {
        statProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
    }

    LOG.info("[sinker] SinkerKafkaSource message: set max.poll.records=1000");
    statProps.setProperty("max.poll.records", "1000");

    consumer = new KafkaConsumer(statProps);
    consumer.assign(topics);

    long beforeOffset = consumer.position(topicPartition);
    String offset = config.getProperty("sinker.kafka.offset");
    if (StringUtils.isBlank(offset) || offset.equalsIgnoreCase("none")) {
        // do nothing
    } else if (offset.equalsIgnoreCase("begin")) {
        consumer.seekToBeginning(Lists.newArrayList(topicPartition));
    } else if (offset.equalsIgnoreCase("end")) {
        consumer.seekToEnd(Lists.newArrayList(topicPartition));
    } else {
        long nOffset = Long.parseLong(offset);
        consumer.seek(topicPartition, nOffset);
    }
    long afferOffset = consumer.position(topicPartition);
    LOG.info("[sinker] SinkerKafkaSource init OK. beforeOffset {}, afferOffset={}", beforeOffset, afferOffset);
}
 
Example 14
Source File: KafkaClient.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public static long getLatestOffset(KafkaConsumer consumer, String topic, int partitionId) {

        TopicPartition topicPartition = new TopicPartition(topic, partitionId);
        consumer.assign(Arrays.asList(topicPartition));
        consumer.seekToEnd(Arrays.asList(topicPartition));

        return consumer.position(topicPartition);
    }
 
Example 15
Source File: BrokerStatsFilter.java    From doctorkafka with Apache License 2.0 4 votes vote down vote up
public static List<BrokerStats> processOnePartition(String zkUrl, TopicPartition topicPartition,
                                                    long startOffset, long endOffset,
                                                    Set<String> brokerNames) {
  KafkaConsumer<byte[], byte[]> kafkaConsumer = null;
  List<BrokerStats> result = new ArrayList<>();
  try {
    String brokers = KafkaUtils.getBrokers(zkUrl, SecurityProtocol.PLAINTEXT);
    LOG.info("ZkUrl: {}, Brokers: {}", zkUrl, brokers);
    Properties props = new Properties();
    props.put(KafkaUtils.BOOTSTRAP_SERVERS, brokers);
    props.put(KafkaUtils.ENABLE_AUTO_COMMIT, "false");
    props.put(KafkaUtils.GROUP_ID, "kafka_operator" + topicPartition);
    props.put(KafkaUtils.KEY_DESERIALIZER,
        "org.apache.kafka.common.serialization.ByteArrayDeserializer");
    props.put(KafkaUtils.VALUE_DESERIALIZER,
        "org.apache.kafka.common.serialization.ByteArrayDeserializer");
    props.put(KafkaUtils.MAX_POLL_RECORDS, 2000);
    props.put("max.partition.fetch.bytes", 1048576 * 4);

    kafkaConsumer = new KafkaConsumer<>(props);
    Set<TopicPartition> topicPartitions = new HashSet<>();
    topicPartitions.add(topicPartition);
    kafkaConsumer.assign(topicPartitions);
    kafkaConsumer.seek(topicPartition, startOffset);

    ConsumerRecords<byte[], byte[]> records = null;
    while (kafkaConsumer.position(topicPartition) < endOffset) {
      records = kafkaConsumer.poll(100);
      for (ConsumerRecord<byte[], byte[]> record : records) {
        BrokerStats brokerStats = OperatorUtil.deserializeBrokerStats(record);
        if (brokerStats == null || brokerStats.getName() == null) {
          continue;
        }
        if (brokerNames.contains(brokerStats.getName())) {
          result.add(brokerStats);
        }
      }
    }
  } catch (Exception e) {
    LOG.error("Exception in processing brokerstats", e);
  } finally {
    if (kafkaConsumer != null) {
      kafkaConsumer.close();
    }
  }
  return result;
}
 
Example 16
Source File: PastReplicaStatsProcessor.java    From doctorkafka with Apache License 2.0 4 votes vote down vote up
public void run() {
  KafkaConsumer<byte[], byte[]> kafkaConsumer = null;
  try {
    String brokers = KafkaUtils.getBrokers(zkUrl, securityProtocol);
    LOG.info("ZkUrl: {}, Brokers: {}", zkUrl, brokers);
    Properties props = new Properties();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers);
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    props.put(ConsumerConfig.GROUP_ID_CONFIG, "doctorkafka_" + topicPartition);
    props.put(KafkaUtils.KEY_DESERIALIZER,
        "org.apache.kafka.common.serialization.ByteArrayDeserializer");
    props.put(KafkaUtils.VALUE_DESERIALIZER,
        "org.apache.kafka.common.serialization.ByteArrayDeserializer");
    props.put(KafkaUtils.MAX_POLL_RECORDS, 2000);
    props.put("max.partition.fetch.bytes", 1048576 * 4);

    kafkaConsumer = new KafkaConsumer<>(props);
    Set<TopicPartition> topicPartitions = new HashSet<>();
    topicPartitions.add(topicPartition);
    kafkaConsumer.assign(topicPartitions);
    kafkaConsumer.seek(topicPartition, startOffset);

    ConsumerRecords<byte[], byte[]> records = null;
    while (kafkaConsumer.position(topicPartition) < endOffset) {
      records = kafkaConsumer.poll(100);
      for (ConsumerRecord<byte[], byte[]> record : records) {
        BrokerStats brokerStats = OperatorUtil.deserializeBrokerStats(record);
        if (brokerStats == null || brokerStats.getName() == null) {
          OpenTsdbMetricConverter.incr(DoctorKafkaMetrics.MESSAGE_DESERIALIZE_ERROR, 1);
          continue;
        }
        replicaStatsManager.update(brokerStats);
      }
    }
  } catch (Exception e) {
    LOG.error("Exception in processing brokerstats", e);
  } finally {
    if (kafkaConsumer != null) {
      kafkaConsumer.close();
    }
  }
}
 
Example 17
Source File: ProjectTableService.java    From DBus with Apache License 2.0 4 votes vote down vote up
public List<Map<String, String>> getTopicOffsets(String topic) {
    KafkaConsumer<String, String> consumer = null;
    try {
        Properties consumerProps = zkService.getProperties(KeeperConstants.KEEPER_CONSUMER_CONF);
        consumerProps.setProperty("client.id", "");
        consumerProps.setProperty("group.id", "topic.offsets.reader.temp");
        Properties globalConf = zkService.getProperties(KeeperConstants.GLOBAL_CONF);
        consumerProps.setProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS, globalConf.getProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS));
        if (StringUtils.equals(SecurityConfProvider.getSecurityConf(zkService), Constants.SECURITY_CONFIG_TRUE_VALUE)) {
            consumerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        }
        List<Map<String, String>> topicMsg = new ArrayList<>();
        // 新建consumer
        consumer = new KafkaConsumer<String, String>(consumerProps);
        /*//订阅topic(订阅所有partition,否则会抛出"You can only check the position for partitions assigned to this consumer.")
        consumer.subscribe(Arrays.asList(topic));*/
        // 获取topic的partition列表
        List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);

        // 获取每个partition信息
        for (PartitionInfo partitionInfo : partitionInfos) {
            int partition = partitionInfo.partition();
            TopicPartition topicPartition = new TopicPartition(topic, partition);
            consumer.assign(Arrays.asList(topicPartition));

            consumer.seekToEnd(consumer.assignment());
            //下一次拉取位置
            long nextFetchOffset = consumer.position(topicPartition);

            consumer.seekToBeginning(consumer.assignment());
            long headOffset = consumer.position(topicPartition);

            Map<String, String> partitionMsg = new HashedMap();
            partitionMsg.put("topic", topic);
            partitionMsg.put("partition", String.valueOf(partition));
            partitionMsg.put("latestOffset", String.valueOf(nextFetchOffset));
            partitionMsg.put("headOffset", String.valueOf(headOffset));
            topicMsg.add(partitionMsg);

        }

        return topicMsg;
    } catch (Exception e) {
        logger.error("[table topic offset] Error encountered while getting topic messages. topic:{}", topic);
        return null;
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
}
 
Example 18
Source File: KafkaAppenderIT.java    From logback-kafka-appender with Apache License 2.0 4 votes vote down vote up
@Test
public void testLogging() {

    final int messageCount = 2048;
    final int messageSize = 1024;

    final Logger logger = loggerContext.getLogger("ROOT");

    unit.start();

    assertTrue("appender is started", unit.isStarted());

    final BitSet messages = new BitSet(messageCount);

    for (int i = 0; i < messageCount; ++i) {
        final String prefix = Integer.toString(i)+ ";";
        final StringBuilder sb = new StringBuilder();
        sb.append(prefix);
        byte[] b = new byte[messageSize-prefix.length()];
        ThreadLocalRandom.current().nextBytes(b);
        for(byte bb : b) {
            sb.append((char)bb & 0x7F);
        }

        final LoggingEvent loggingEvent = new LoggingEvent("a.b.c.d", logger, Level.INFO, sb.toString(), null, new Object[0]);
        unit.append(loggingEvent);
        messages.set(i);
    }

    unit.stop();
    assertFalse("appender is stopped", unit.isStarted());

    final KafkaConsumer<byte[], byte[]> javaConsumerConnector = kafka.createClient();
    javaConsumerConnector.assign(Collections.singletonList(new TopicPartition("logs", 0)));
    javaConsumerConnector.seekToBeginning(Collections.singletonList(new TopicPartition("logs", 0)));
    final long position = javaConsumerConnector.position(new TopicPartition("logs", 0));
    assertEquals(0, position);

    ConsumerRecords<byte[], byte[]> poll = javaConsumerConnector.poll(10000);
    int readMessages = 0;
    while (!poll.isEmpty()) {
        for (ConsumerRecord<byte[], byte[]> aPoll : poll) {
            byte[] msg = aPoll.value();
            byte[] msgPrefix = new byte[32];
            System.arraycopy(msg, 0, msgPrefix, 0, 32);
            final String messageFromKafka = new String(msgPrefix, UTF8);
            int delimiter = messageFromKafka.indexOf(';');
            final int msgNo = Integer.parseInt(messageFromKafka.substring(0, delimiter));
            messages.set(msgNo, false);
            readMessages++;
        }
        poll = javaConsumerConnector.poll(1000);
    }

    assertEquals(messageCount, readMessages);
    assertThat(fallbackLoggingEvents, empty());
    assertEquals("all messages should have been read", BitSet.valueOf(new byte[0]), messages);

}