Java Code Examples for org.apache.kafka.clients.consumer.KafkaConsumer#partitionsFor()

The following examples show how to use org.apache.kafka.clients.consumer.KafkaConsumer#partitionsFor() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SparkDataSetProcessor.java    From spliceengine with GNU Affero General Public License v3.0 6 votes vote down vote up
public <V> DataSet<ExecRow> readKafkaTopic(String topicName, OperationContext context) throws StandardException {
    Properties props = new Properties();
    String consumerGroupId = "spark-consumer-dss-sdsp";
    String bootstrapServers = SIDriver.driver().getConfiguration().getKafkaBootstrapServers();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
    props.put(ConsumerConfig.CLIENT_ID_CONFIG, consumerGroupId+"-"+UUID.randomUUID());
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ExternalizableDeserializer.class.getName());

    KafkaConsumer<Integer, Externalizable> consumer = new KafkaConsumer<Integer, Externalizable>(props);
    List ps = consumer.partitionsFor(topicName);
    List<Integer> partitions = new ArrayList<>(ps.size());
    for (int i = 0; i < ps.size(); ++i) {
        partitions.add(i);
    }
    consumer.close();

    SparkDataSet rdd = new SparkDataSet(SpliceSpark.getContext().parallelize(partitions, partitions.size()));
    return rdd.flatMap(new KafkaReadFunction(context, topicName, bootstrapServers));
}
 
Example 2
Source File: KafkaSource.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
@Override
public StreamingTableSourceInfo load(String cubeName) {
    KylinConfig kylinConf = KylinConfig.getInstanceFromEnv();
    CubeInstance cube = CubeManager.getInstance(kylinConf).getCube(cubeName);
    String streamingTableName = cube.getRootFactTable();
    StreamingSourceConfig streamingSourceConfig = StreamingSourceConfigManager.getInstance(kylinConf)
            .getConfig(streamingTableName);

    String topicName = getTopicName(streamingSourceConfig.getProperties());
    Map<String, Object> conf = getKafkaConf(streamingSourceConfig.getProperties(), cube.getConfig());

    KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(conf);
    try {
        List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(topicName);
        List<Partition> kafkaPartitions = Lists.transform(partitionInfos, new Function<PartitionInfo, Partition>() {
            @Nullable
            @Override
            public Partition apply(@Nullable PartitionInfo input) {
                return new Partition(input.partition());
            }
        });
        return new StreamingTableSourceInfo(kafkaPartitions);
    } finally {
        kafkaConsumer.close();
    }
}
 
Example 3
Source File: KafkaDispatcherImpl.java    From arcusplatform with Apache License 2.0 6 votes vote down vote up
private Collection<TopicPartition> toKafkaPartitions(
		String topic, 
		Set<PlatformPartition> newPartitions,
		KafkaConsumer<?, ?> consumer
) {
	List<PartitionInfo> kafkaPartitions = consumer.partitionsFor(topic);
	int partitionRatio = platformPartitions / kafkaPartitions.size(); 
	logger.info("Discovered [{}] kafka partitions and [{}] platform partitions: [{}] platform partitions per kafka partition", kafkaPartitions.size(), platformPartitions, partitionRatio);
	Map<Integer, Integer> partitionMap = new LinkedHashMap<>();
	for(PlatformPartition pp: newPartitions) {
		int kafkaPartition = pp.getId() % kafkaPartitions.size();
		partitionMap.put(kafkaPartition, partitionMap.getOrDefault(kafkaPartition, 0) + 1);
	}
	List<TopicPartition> tp = new ArrayList<>(Math.max(1, partitionMap.size()));
	for(Map.Entry<Integer, Integer> entry: partitionMap.entrySet()) {
		Preconditions.checkState(entry.getValue() == partitionRatio, "Kafka partition %d partially assigned to this node, that is not currently supported", entry.getKey());
		tp.add(new TopicPartition(topic, entry.getKey()));
	}
	logger.info("Assigning partitions [{}] to this node", partitionMap.keySet());
	return tp;
}
 
Example 4
Source File: KafkaConsumeTest.java    From joyqueue with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
        Consumer consumer = new Consumer(KafkaConfigs.TOPIC);
        KafkaConsumer kafkaConsumer = consumer.getKafkaConsumer();

        List<String> topics = Lists.newLinkedList();
        for (int i = 0; i < KafkaConfigs.TOPIC_COUNT; i++) {
            topics.add(KafkaConfigs.TOPIC + "_" + i);
        }

        List<TopicPartition> topicPartitions = Lists.newLinkedList();
        for (Object partitionInfo : kafkaConsumer.partitionsFor(KafkaConfigs.TOPIC)) {
            topicPartitions.add(new TopicPartition(((PartitionInfo) partitionInfo).topic(), ((PartitionInfo) partitionInfo).partition()));
        }

//        kafkaConsumer.assign(topicPartitions);
        kafkaConsumer.subscribe(Lists.newArrayList(KafkaConfigs.TOPIC));

        System.out.println("kafka consumer is started");

        while (!Thread.interrupted()) {
            consumer.doWork();
        }
    }
 
Example 5
Source File: CheckBeginingOffset.java    From BigData-In-Practice with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumer<String, String> kafkaConsumer = createNewConsumer();
    List<PartitionInfo> partitions = kafkaConsumer.partitionsFor("topic-monitor");
    List<TopicPartition> tpList = partitions.stream()
            .map(pInfo -> new TopicPartition(pInfo.topic(), pInfo.partition()))
            .collect(toList());
    Map<TopicPartition, Long> beginningOffsets =
            kafkaConsumer.beginningOffsets(tpList);
    System.out.println(beginningOffsets);
}
 
Example 6
Source File: KafkaSource.java    From kylin with Apache License 2.0 6 votes vote down vote up
@Override
public StreamingTableSourceInfo load(String cubeName) {
    KylinConfig kylinConf = KylinConfig.getInstanceFromEnv();
    CubeInstance cube = CubeManager.getInstance(kylinConf).getCube(cubeName);
    String streamingTableName = cube.getRootFactTable();
    StreamingSourceConfig streamingSourceConfig = StreamingSourceConfigManager.getInstance(kylinConf)
            .getConfig(streamingTableName);

    String topicName = getTopicName(streamingSourceConfig.getProperties());
    Map<String, Object> conf = getKafkaConf(streamingSourceConfig.getProperties(), cube.getConfig());

    KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(conf);
    try {
        List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(topicName);
        List<Partition> kafkaPartitions = Lists.transform(partitionInfos, new Function<PartitionInfo, Partition>() {
            @Nullable
            @Override
            public Partition apply(@Nullable PartitionInfo input) {
                return new Partition(input.partition());
            }
        });
        return new StreamingTableSourceInfo(kafkaPartitions);
    } finally {
        kafkaConsumer.close();
    }
}
 
Example 7
Source File: KafkaConsumerFactory.java    From kafka-webview with MIT License 6 votes vote down vote up
/**
 * Create a new KafkaConsumer based on the passed in ClientConfig, and subscribe to the appropriate
 * partitions.
 * @deprecated To be removed.
 */
public KafkaConsumer createConsumerAndSubscribe(final ClientConfig clientConfig) {
    final KafkaConsumer kafkaConsumer = createConsumer(clientConfig);

    // Determine which partitions to subscribe to, for now do all
    final List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(clientConfig.getTopicConfig().getTopicName());

    // Pull out partitions, convert to topic partitions
    final Collection<TopicPartition> topicPartitions = new ArrayList<>();
    for (final PartitionInfo partitionInfo: partitionInfos) {
        // Skip filtered partitions
        if (!clientConfig.isPartitionFiltered(partitionInfo.partition())) {
            topicPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
        }
    }

    // Assign them.
    kafkaConsumer.assign(topicPartitions);

    // Return the kafka consumer.
    return kafkaConsumer;
}
 
Example 8
Source File: ParallelWebKafkaConsumer.java    From kafka-webview with MIT License 6 votes vote down vote up
/**
 * Mark synchronized to prevent multi-threaded weirdness.
 */
private List<TopicPartition> getAllPartitions(final KafkaConsumer<?,?> kafkaConsumer) {
    // If we have not pulled this yet
    if (cachedTopicsAndPartitions == null) {
        // Attempt to prevent multi-threaded weirdness.
        synchronized (this) {
            if (cachedTopicsAndPartitions == null) {
                // Determine which partitions are available
                final List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(clientConfig.getTopicConfig().getTopicName());

                // Pull out partitions, convert to topic partitions
                final List<TopicPartition> tempHolder = new ArrayList<>();
                for (final PartitionInfo partitionInfo : partitionInfos) {
                    // Skip filtered partitions
                    if (!clientConfig.isPartitionFiltered(partitionInfo.partition())) {
                        tempHolder.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
                    }
                }
                cachedTopicsAndPartitions = Collections.unmodifiableList(tempHolder);
            }
        }

    }
    return cachedTopicsAndPartitions;
}
 
Example 9
Source File: CdcKafkaPublisherTest.java    From light-eventuate-4j with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldSendPublishedEventsToKafka() throws InterruptedException {
  CdcKafkaPublisher<PublishedEvent> cdcKafkaPublisher = createCdcKafkaPublisher();

  cdcKafkaPublisher.start();

  cdcProcessor.start(cdcKafkaPublisher::handleEvent);

  String accountCreatedEventData = generateAccountCreatedEvent();
  EntityIdVersionAndEventIds entityIdVersionAndEventIds = saveEvent(localAggregateCrud, accountCreatedEventData);

  KafkaConsumer<String, String> consumer = createConsumer(config.getBootstrapServers());
  consumer.partitionsFor(getEventTopicName());
  consumer.subscribe(Collections.singletonList(getEventTopicName()));

  waitForEventInKafka(consumer, entityIdVersionAndEventIds.getEntityId(), LocalDateTime.now().plusSeconds(20));
  cdcKafkaPublisher.stop();
}
 
Example 10
Source File: KafkaConfigUtil.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
private static Map<KafkaTopicPartition, Long> buildOffsetByTime(Properties props, ParameterTool parameterTool, Long time) {
    props.setProperty("group.id", "query_time_" + time);
    KafkaConsumer consumer = new KafkaConsumer(props);
    List<PartitionInfo> partitionsFor = consumer.partitionsFor(parameterTool.getRequired(PropertiesConstants.METRICS_TOPIC));
    Map<TopicPartition, Long> partitionInfoLongMap = new HashMap<>();
    for (PartitionInfo partitionInfo : partitionsFor) {
        partitionInfoLongMap.put(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()), time);
    }
    Map<TopicPartition, OffsetAndTimestamp> offsetResult = consumer.offsetsForTimes(partitionInfoLongMap);
    Map<KafkaTopicPartition, Long> partitionOffset = new HashMap<>();
    offsetResult.forEach((key, value) -> partitionOffset.put(new KafkaTopicPartition(key.topic(), key.partition()), value.offset()));

    consumer.close();
    return partitionOffset;
}
 
Example 11
Source File: Kafka0_10ConsumerLoader.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private void setOffsetsByTimestamp(String topic, KafkaConsumer kafkaAuxiliaryConsumer) {
  // Build map of topics partitions and timestamp to use when searching offset for that partition (same timestamp
  // for all the partitions)
  List<PartitionInfo> partitionInfoList = kafkaAuxiliaryConsumer.partitionsFor(topic);

  if (partitionInfoList != null) {
    Map<TopicPartition, Long> partitionsAndTimestampMap = partitionInfoList.stream().map(e -> new TopicPartition(
        topic,
        e.partition()
    )).collect(Collectors.toMap(e -> e, (e) -> timestampToSearchOffsets));

    // Get Offsets by timestamp using previously built map and commit them to corresponding partition
    if (!partitionsAndTimestampMap.isEmpty()) {
      Map<TopicPartition, OffsetAndTimestamp> partitionsOffsets = kafkaAuxiliaryConsumer.offsetsForTimes(
          partitionsAndTimestampMap);
      if (partitionsOffsets != null && !partitionsOffsets.isEmpty()) {
        Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = partitionsOffsets.entrySet().stream().filter(
            entry -> entry.getKey() != null && entry.getValue() != null).collect(
            Collectors.toMap(entry -> entry.getKey(), entry -> new OffsetAndMetadata(entry.getValue().offset())));

        if (!offsetsToCommit.isEmpty()) {
          kafkaAuxiliaryConsumer.commitSync(offsetsToCommit);
        }
      }
    }
  }


}
 
Example 12
Source File: Kafka0_10ConsumerLoader.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private boolean firstConnection(String topic, KafkaConsumer kafkaAuxiliaryConsumer) throws StageException {
  LOG.debug("Checking first connection for Topic {}", topic);
  if (topic != null && !topic.isEmpty()) {
    List<PartitionInfo> partitionInfoList = kafkaAuxiliaryConsumer.partitionsFor(topic);
    for (PartitionInfo partitionInfo : partitionInfoList) {
      if (partitionInfo != null) {
        TopicPartition topicPartition = new TopicPartition(topic, partitionInfo.partition());
        try {
          OffsetAndMetadata offsetAndMetadata = kafkaAuxiliaryConsumer.committed(topicPartition);
          if (offsetAndMetadata != null) {
            // Already defined offset for that partition
            LOG.debug("Offset defined for Topic {} , partition {}", topic, topicPartition.partition());
            kafkaAuxiliaryConsumer.close();
            return false;
          }
        } catch (Exception ex) {
          // Could not obtain committed offset for corresponding partition
          LOG.error(KafkaErrors.KAFKA_30.getMessage(), ex.toString(), ex);
          throw new StageException(KafkaErrors.KAFKA_30, ex.toString(), ex);
        }

      }
    }
  }

  // There was no offset already defined for any partition so it is the first connection
  return true;
}
 
Example 13
Source File: EventuateKafkaConsumer.java    From light-eventuate-4j with Apache License 2.0 5 votes vote down vote up
public static List<PartitionInfo> verifyTopicExistsBeforeSubscribing(KafkaConsumer<String, String> consumer, String topic) {
  try {
    logger.debug("Verifying Topic {}", topic);
    List<PartitionInfo> partitions = consumer.partitionsFor(topic);
    logger.debug("Got these partitions {} for Topic {}", partitions, topic);
    return partitions;
  } catch (Throwable e) {
    logger.error("Got exception: ", e);
    throw new RuntimeException(e);
  }
}
 
Example 14
Source File: KafkaAvroSerDesWithKafkaServerTest.java    From registry with Apache License 2.0 5 votes vote down vote up
private ConsumerRecords<String, Object> consumeMessage(String topicName, String bootstrapServers, String consumerGroup) {
    Map<String, Object> props = new HashMap<>();
    props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
    props.putAll(SCHEMA_REGISTRY_TEST_SERVER_CLIENT_WRAPPER.exportClientConf(true));
    props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroup);
    props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
    props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
    props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
    props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class.getName());
    props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, KafkaAvroDeserializer.class.getName());

    KafkaConsumer<String, Object> consumer = new KafkaConsumer<>(props);

    List<PartitionInfo> partitionInfos = consumer.partitionsFor(topicName);
    Collection<TopicPartition> partitions = new ArrayList<>();
    for (PartitionInfo partitionInfo : partitionInfos) {
        partitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
    }
    LOG.info("partitions [{}]", partitions);
    LOG.info("subscribed topis: [{}] ", consumer.listTopics());

    consumer.assign(partitions);
    consumer.seekToBeginning(partitions);

    ConsumerRecords<String, Object> consumerRecords = null;
    int ct = 0;
    while (ct++ < 100 && (consumerRecords == null || consumerRecords.isEmpty())) {
        LOG.info("Polling for consuming messages");
        consumerRecords = consumer.poll(Duration.ofMillis(500));
    }
    consumer.commitSync();
    consumer.close();

    return consumerRecords;
}
 
Example 15
Source File: KafkaConfigUtil.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
private static Map<KafkaTopicPartition, Long> buildOffsetByTime(Properties props, ParameterTool parameterTool, Long time) {
    props.setProperty("group.id", "query_time_" + time);
    KafkaConsumer consumer = new KafkaConsumer(props);
    List<PartitionInfo> partitionsFor = consumer.partitionsFor(parameterTool.getRequired(PropertiesConstants.METRICS_TOPIC));
    Map<TopicPartition, Long> partitionInfoLongMap = new HashMap<>();
    for (PartitionInfo partitionInfo : partitionsFor) {
        partitionInfoLongMap.put(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()), time);
    }
    Map<TopicPartition, OffsetAndTimestamp> offsetResult = consumer.offsetsForTimes(partitionInfoLongMap);
    Map<KafkaTopicPartition, Long> partitionOffset = new HashMap<>();
    offsetResult.forEach((key, value) -> partitionOffset.put(new KafkaTopicPartition(key.topic(), key.partition()), value.offset()));

    consumer.close();
    return partitionOffset;
}
 
Example 16
Source File: KafkaConsumerAPITest.java    From javabase with Apache License 2.0 4 votes vote down vote up
private static void listPartion(KafkaConsumer<Integer, String> consumer) {
    List<PartitionInfo> partitionsFor = consumer.partitionsFor("mytopic2");
    for (PartitionInfo partitionInfo : partitionsFor) {
        System.out.println(""+partitionInfo);
    }
}
 
Example 17
Source File: KafkaClusterManager.java    From doctorkafka with Apache License 2.0 4 votes vote down vote up
/**
 * Call the kafka api to get the list of under-replicated partitions.
 * When a topic partition loses all of its replicas, it will not have a leader broker.
 * We need to handle this special case in detecting under replicated topic partitions.
 */
public static List<PartitionInfo> getUnderReplicatedPartitions(
    String zkUrl, SecurityProtocol securityProtocol, Map<String, String> consumerConfigs,
    List<String> topics,
    scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>> partitionAssignments,
    Map<String, Integer> replicationFactors,
    Map<String, Integer> partitionCounts) {
  List<PartitionInfo> underReplicated = new ArrayList();
  KafkaConsumer kafkaConsumer = KafkaUtils.getKafkaConsumer(zkUrl, securityProtocol, consumerConfigs);
  for (String topic : topics) {
    List<PartitionInfo> partitionInfoList = kafkaConsumer.partitionsFor(topic);
    if (partitionInfoList == null) {
      LOG.error("Failed to get partition info for {}", topic);
      continue;
    }
    int numPartitions = partitionCounts.get(topic);

    // when a partition loses all replicas and does not have a live leader,
    // kafkaconsumer.partitionsFor(...) will not return info for that partition.
    // the noLeaderFlag array is used to detect partitions that have no leaders
    boolean[] noLeaderFlags = new boolean[numPartitions];
    for (int i = 0; i < numPartitions; i++) {
      noLeaderFlags[i] = true;
    }
    for (PartitionInfo info : partitionInfoList) {
      if (info.inSyncReplicas().length < info.replicas().length &&
          replicationFactors.get(info.topic()) > info.inSyncReplicas().length) {
        underReplicated.add(info);
      }
      noLeaderFlags[info.partition()] = false;
    }

    // deal with the partitions that do not have leaders
    for (int partitionId = 0; partitionId < numPartitions; partitionId++) {
      if (noLeaderFlags[partitionId]) {
        Seq<Object> seq = partitionAssignments.get(topic).get().get(partitionId).get();
        Node[] nodes = JavaConverters.seqAsJavaList(seq).stream()
            .map(val -> new Node((Integer) val, "", -1)).toArray(Node[]::new);
        PartitionInfo partitionInfo =
            new PartitionInfo(topic, partitionId, null, nodes, new Node[0]);
        underReplicated.add(partitionInfo);
      }
    }
  }
  return underReplicated;
}
 
Example 18
Source File: KafkaTestUtils.java    From brooklin with BSD 2-Clause "Simplified" License 4 votes vote down vote up
/**
 * Get all topic-partition info of a given Kafka topic on a given set of Kafka brokers
 */
public static List<PartitionInfo> getPartitionInfo(String topic, String brokerList) {
  KafkaConsumer<byte[], byte[]> consumer = createConsumer(brokerList);
  return consumer.partitionsFor(topic);
}
 
Example 19
Source File: ProjectTableService.java    From DBus with Apache License 2.0 4 votes vote down vote up
public List<Map<String, String>> getTopicOffsets(String topic) {
    KafkaConsumer<String, String> consumer = null;
    try {
        Properties consumerProps = zkService.getProperties(KeeperConstants.KEEPER_CONSUMER_CONF);
        consumerProps.setProperty("client.id", "");
        consumerProps.setProperty("group.id", "topic.offsets.reader.temp");
        Properties globalConf = zkService.getProperties(KeeperConstants.GLOBAL_CONF);
        consumerProps.setProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS, globalConf.getProperty(GLOBAL_CONF_KEY_BOOTSTRAP_SERVERS));
        if (StringUtils.equals(SecurityConfProvider.getSecurityConf(zkService), Constants.SECURITY_CONFIG_TRUE_VALUE)) {
            consumerProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT");
        }
        List<Map<String, String>> topicMsg = new ArrayList<>();
        // 新建consumer
        consumer = new KafkaConsumer<String, String>(consumerProps);
        /*//订阅topic(订阅所有partition,否则会抛出"You can only check the position for partitions assigned to this consumer.")
        consumer.subscribe(Arrays.asList(topic));*/
        // 获取topic的partition列表
        List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);

        // 获取每个partition信息
        for (PartitionInfo partitionInfo : partitionInfos) {
            int partition = partitionInfo.partition();
            TopicPartition topicPartition = new TopicPartition(topic, partition);
            consumer.assign(Arrays.asList(topicPartition));

            consumer.seekToEnd(consumer.assignment());
            //下一次拉取位置
            long nextFetchOffset = consumer.position(topicPartition);

            consumer.seekToBeginning(consumer.assignment());
            long headOffset = consumer.position(topicPartition);

            Map<String, String> partitionMsg = new HashedMap();
            partitionMsg.put("topic", topic);
            partitionMsg.put("partition", String.valueOf(partition));
            partitionMsg.put("latestOffset", String.valueOf(nextFetchOffset));
            partitionMsg.put("headOffset", String.valueOf(headOffset));
            topicMsg.add(partitionMsg);

        }

        return topicMsg;
    } catch (Exception e) {
        logger.error("[table topic offset] Error encountered while getting topic messages. topic:{}", topic);
        return null;
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
}
 
Example 20
Source File: KafkaService.java    From cerberus-source with GNU General Public License v3.0 4 votes vote down vote up
@SuppressWarnings("unchecked")
@Override
public AnswerItem<Map<TopicPartition, Long>> seekEvent(String topic, String bootstrapServers,
        List<AppServiceHeader> serviceHeader) throws InterruptedException, ExecutionException {

    MessageEvent message = new MessageEvent(MessageEventEnum.ACTION_SUCCESS_CALLSERVICE_SEARCHKAFKA);
    AnswerItem<Map<TopicPartition, Long>> result = new AnswerItem<>();

    KafkaConsumer consumer = null;

    try {

        Properties props = new Properties();
        serviceHeader.add(factoryAppServiceHeader.create(null, "bootstrap.servers", bootstrapServers, "Y", 0, "", "", null, "", null));
        serviceHeader.add(factoryAppServiceHeader.create(null, "enable.auto.commit", "false", "Y", 0, "", "", null, "", null));
        serviceHeader.add(factoryAppServiceHeader.create(null, "max.poll.records", "10", "Y", 0, "", "", null, "", null));
        serviceHeader.add(factoryAppServiceHeader.create(null, "key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer", "Y", 0, "", "", null, "", null));
        serviceHeader.add(factoryAppServiceHeader.create(null, "value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer", "Y", 0, "", "", null, "", null));

        for (AppServiceHeader object : serviceHeader) {
            if (StringUtil.parseBoolean(object.getActive())) {
                props.put(object.getKey(), object.getValue());
            }
        }

        LOG.info("Open Consumer : " + getKafkaConsumerKey(topic, bootstrapServers));
        consumer = new KafkaConsumer<>(props);

        //Get a list of the topics' partitions
        List<PartitionInfo> partitionList = consumer.partitionsFor(topic);

        if (partitionList == null) {

            message = new MessageEvent(MessageEventEnum.ACTION_FAILED_CALLSERVICE_SEEKKAFKA);
            message.setDescription(message.getDescription().replace("%EX%", "Maybe Topic does not exist.").replace("%TOPIC%", topic).replace("%HOSTS%", bootstrapServers));

        } else {

            List<TopicPartition> topicPartitionList = partitionList.stream().map(info -> new TopicPartition(topic, info.partition())).collect(Collectors.toList());
            //Assign all the partitions to this consumer
            consumer.assign(topicPartitionList);
            consumer.seekToEnd(topicPartitionList); //default to latest offset for all partitions

            HashMap<TopicPartition, Long> valueResult = new HashMap<>();

            Map<TopicPartition, Long> partitionOffset = consumer.endOffsets(topicPartitionList);

            result.setItem(partitionOffset);

        }

    } catch (Exception ex) {
        message = new MessageEvent(MessageEventEnum.ACTION_FAILED_CALLSERVICE_SEEKKAFKA);
        message.setDescription(message.getDescription().replace("%EX%", ex.toString()).replace("%TOPIC%", topic).replace("%HOSTS%", bootstrapServers));
        LOG.debug(ex, ex);
    } finally {
        if (consumer != null) {
            consumer.close();
            LOG.info("Closed Consumer : " + getKafkaConsumerKey(topic, bootstrapServers));
        } else {
            LOG.info("Consumer not opened : " + getKafkaConsumerKey(topic, bootstrapServers));
        }
    }
    result.setResultMessage(message);
    return result;
}