Java Code Examples for org.apache.kafka.common.TopicPartition#topic()

The following examples show how to use org.apache.kafka.common.TopicPartition#topic() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SecorConsumerRebalanceListener.java    From secor with Apache License 2.0 6 votes vote down vote up
private Map<TopicPartition, Long> getCommittedOffsets(Collection<TopicPartition> assignment) {
    Map<TopicPartition, Long> committedOffsets = new HashMap<>();

    for (TopicPartition topicPartition : assignment) {
        com.pinterest.secor.common.TopicPartition secorTopicPartition =
                new com.pinterest.secor.common.TopicPartition(topicPartition.topic(), topicPartition.partition());
        try {
            long committedOffset = mZookeeperConnector.getCommittedOffsetCount(secorTopicPartition);
            committedOffsets.put(topicPartition, committedOffset);
        } catch (Exception e) {
            LOG.trace("Unable to fetch committed offsets from zookeeper", e);
            throw new RuntimeException(e);
        }
    }

    return committedOffsets;
}
 
Example 2
Source File: ZookeeperCheckpointManager.java    From uReplicator with Apache License 2.0 6 votes vote down vote up
public Long fetchOffset(TopicPartition topicPartition) {
  ZKGroupTopicDirs dirs = new ZKGroupTopicDirs(groupId, topicPartition.topic());
  String path = dirs.consumerOffsetDir() + "/" + topicPartition.partition();
  if (!commitZkClient.exists(path)) {
    return -1L;
  }
  String offset = commitZkClient.readData(path).toString();
  if (StringUtils.isEmpty(offset)) {
    return -1L;
  }
  try {
    return Long.parseLong(offset);
  } catch (Exception e) {
    LOGGER.warn("Parse offset {} for topic partition failed, zk path: {}", offset, path);
    return -1L;
  }
}
 
Example 3
Source File: KafkaMonitor.java    From mirus with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
private String applyRoutersToTopic(String topic) {
  TopicPartition topicPartition = new TopicPartition(topic, 0);
  Map<String, Object> sourcePartition = TopicPartitionSerDe.asMap(topicPartition);
  SourceRecord record =
      new SourceRecord(
          sourcePartition,
          null,
          topicPartition.topic(),
          topicPartition.partition(),
          Schema.BYTES_SCHEMA,
          null,
          Schema.OPTIONAL_BYTES_SCHEMA,
          null);
  for (Transformation<SourceRecord> transform : this.routers) {
    record = transform.apply(record);
  }
  return record.topic();
}
 
Example 4
Source File: TopicPartitionCounterTest.java    From connect-utils with Apache License 2.0 6 votes vote down vote up
@Test
public void incrementSinkRecord() {
  final TopicPartition topicPartition = new TopicPartition("test", 1);
  final Map<TopicPartition, Long> expected = ImmutableMap.of(
      topicPartition, 123L
  );

  SinkRecord record = new SinkRecord(
      topicPartition.topic(),
      topicPartition.partition(),
      Schema.STRING_SCHEMA,
      "",
      Schema.STRING_SCHEMA,
      "",
      123L
  );
  this.counter.increment(record);
  assertEquals(expected, this.counter.data());
}
 
Example 5
Source File: AppenderConsumer.java    From DBus with Apache License 2.0 6 votes vote down vote up
@Override
public void pauseTopic(TopicPartition tp, long offset, ControlMessage message) {

    String topic = message.payloadValue("topic", String.class);
    String tableName = message.payloadValue("TABLE_NAME", String.class);
    if (topic == null || topic.length() == 0) topic = tp.topic();
    if (!pausedTopics.containsKey(topic)) {
        consumer.pause(Arrays.asList(tp));
        TopicInfo topicInfo = TopicInfo.build(tp.topic(), tp.partition(), offset, tableName);
        pausedTopics.put(topic, topicInfo);

        try {
            zkNodeOperator.setData(pausedTopics, true);
        } catch (Exception e) {
            logger.error("Adding paused topics error", e);
        }
        logger.info("Topic [{}] was paused by command", tp.topic());
    } else {
        logger.info("Topic [{}] has been paused, the pause action was skipped", tp.topic());
    }
}
 
Example 6
Source File: GroupMetrics.java    From kafka-metrics with Apache License 2.0 5 votes vote down vote up
private MetricName NewName(String group, TopicPartition tp) {
    return new MetricName(
            "kafka.groups",
            "Group",
            name,
            "",
            "kafka.consumer:type=Group,name=" + name
                    + ",group=" + group
                    + ",topic=" + tp.topic()
                    + ",partition=" + tp.partition());
}
 
Example 7
Source File: ConsumerLease.java    From nifi with Apache License 2.0 5 votes vote down vote up
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding, final RecordSetWriter recordWriter) {
    this.initialOffset = initialRecord.offset();
    this.initialTimestamp = initialRecord.timestamp();
    this.partition = topicPartition.partition();
    this.topic = topicPartition.topic();
    this.recordWriter = recordWriter;
    this.key = encodeKafkaKey(initialRecord.key(), keyEncoding);
}
 
Example 8
Source File: ConsumerLease.java    From nifi with Apache License 2.0 5 votes vote down vote up
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding, final RecordSetWriter recordWriter) {
    this.initialOffset = initialRecord.offset();
    this.initialTimestamp = initialRecord.timestamp();
    this.partition = topicPartition.partition();
    this.topic = topicPartition.topic();
    this.recordWriter = recordWriter;
    this.key = encodeKafkaKey(initialRecord.key(), keyEncoding);
}
 
Example 9
Source File: ConsumerLease.java    From nifi with Apache License 2.0 5 votes vote down vote up
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding, final RecordSetWriter recordWriter) {
    this.initialOffset = initialRecord.offset();
    this.initialTimestamp = initialRecord.timestamp();
    this.partition = topicPartition.partition();
    this.topic = topicPartition.topic();
    this.recordWriter = recordWriter;
    this.key = encodeKafkaKey(initialRecord.key(), keyEncoding);
}
 
Example 10
Source File: KafkaOffsetGetter.java    From Kafka-Insight with Apache License 2.0 4 votes vote down vote up
public static List<OffsetInfo> getOffsetQuarz() {

        Map<String, Map<String, List<OffsetInfo>>> groupTopicPartitionListMap = new ConcurrentHashMap<>();

        for (Map.Entry<GroupTopicPartition, OffsetAndMetadata> entry: kafkaConsumerOffsets.entrySet()) {
            GroupTopicPartition groupTopicPartition = entry.getKey();
            OffsetAndMetadata offsetAndMetadata = entry.getValue();
            String group = groupTopicPartition.group();
            TopicPartition topicPartition = groupTopicPartition.topicPartition();
            String topic = topicPartition.topic();
            int partition = topicPartition.partition();
            Long committedOffset = offsetAndMetadata.offset();

            if (!logEndOffsetMap.containsKey(topicPartition)) {
                logger.error("The logEndOffsetMap not contains " + topicPartition);
                return null;
            }
            long logSize = logEndOffsetMap.get(topicPartition);

            // May the refresh operation thread take some time to update
            logSize = logSize >= committedOffset ? logSize : committedOffset;
            long lag = committedOffset == -1 ? 0 : (logSize - committedOffset);

            OffsetInfo offsetInfo = new OffsetInfo();
            offsetInfo.setGroup(group);
            offsetInfo.setTopic(topic);
            offsetInfo.setCommittedOffset(committedOffset);
            offsetInfo.setLogSize(logSize);
            offsetInfo.setLag(lag);
            offsetInfo.setTimestamp(offsetAndMetadata.commitTimestamp());

            if (!groupTopicPartitionListMap.containsKey(group)) {
                Map<String, List<OffsetInfo>> topicPartitionMap = new ConcurrentHashMap<>();
                groupTopicPartitionListMap.put(group, topicPartitionMap);
            }
            if (!groupTopicPartitionListMap.get(group).containsKey(topic)) {
                List<OffsetInfo> offsetInfos = new ArrayList<>();
                groupTopicPartitionListMap.get(group).put(topic, offsetInfos);
            }
            groupTopicPartitionListMap.get(group).get(topic).add(offsetInfo);

        }
        return flattenNestedMap(groupTopicPartitionListMap);
    }
 
Example 11
Source File: KafkaUtil.java    From samza with Apache License 2.0 4 votes vote down vote up
public static SystemStreamPartition toSystemStreamPartition(String systemName, TopicPartition topicPartition) {
  Partition partition = new Partition(topicPartition.partition());
  return new SystemStreamPartition(systemName, topicPartition.topic(), partition);
}
 
Example 12
Source File: KafkaConsumerWrapper.java    From attic-apex-malhar with Apache License 2.0 4 votes vote down vote up
/**
 * This method is called in the activate method of the operator
 */
public void start(boolean waitForReplay)
{
  this.waitForReplay = waitForReplay;
  isAlive.set(true);

  // thread to consume the kafka data
  // create thread pool for consumer threads
  kafkaConsumerExecutor = Executors.newCachedThreadPool(
    new ThreadFactoryBuilder().setNameFormat("kafka-consumer-%d").build());

  // group list of PartitionMeta by cluster
  Map<String, List<TopicPartition>> consumerAssignment = new HashMap<>();
  Set<AbstractKafkaPartitioner.PartitionMeta> assignments = ownerOperator.assignment();
  for (AbstractKafkaPartitioner.PartitionMeta partitionMeta : assignments) {
    String cluster = partitionMeta.getCluster();
    List<TopicPartition> cAssignment = consumerAssignment.get(cluster);
    if (cAssignment == null) {
      cAssignment = new LinkedList<>();
      consumerAssignment.put(cluster, cAssignment);
    }
    cAssignment.add(new TopicPartition(partitionMeta.getTopic(), partitionMeta.getPartitionId()));
  }

  Map<AbstractKafkaPartitioner.PartitionMeta, Long> currentOffset = ownerOperator.getOffsetTrack();

  //  create one thread for each cluster
  // each thread use one KafkaConsumer to consume from 1+ partition(s) of 1+ topic(s)
  for (Map.Entry<String, List<TopicPartition>> e : consumerAssignment.entrySet()) {

    Properties prop = new Properties();
    if (ownerOperator.getConsumerProps() != null) {
      prop.putAll(ownerOperator.getConsumerProps());
    }

    prop.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, e.getKey());
    prop.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none");
    // never auto commit the offsets
    prop.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
    prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    prop.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
    AbstractKafkaInputOperator.InitialOffset initialOffset =
        AbstractKafkaInputOperator.InitialOffset.valueOf(ownerOperator.getInitialOffset());

    if (initialOffset == AbstractKafkaInputOperator.InitialOffset.APPLICATION_OR_EARLIEST ||
        initialOffset == AbstractKafkaInputOperator.InitialOffset.APPLICATION_OR_LATEST) {
      // commit the offset with application name if we set initialoffset to application
      prop.put(ConsumerConfig.GROUP_ID_CONFIG, ownerOperator.getApplicationName() + "_Consumer");
    }

    AbstractKafkaConsumer kc = ownerOperator.createConsumer(prop);
    kc.assignPartitions(e.getValue());
    if (logger.isInfoEnabled()) {
      logger.info("Create consumer with properties {} ", Joiner.on(";").withKeyValueSeparator("=").join(prop));
      logger.info("Assign consumer to {}", Joiner.on('#').join(e.getValue()));
    }
    if (currentOffset != null && !currentOffset.isEmpty()) {
      for (TopicPartition tp : e.getValue()) {
        AbstractKafkaPartitioner.PartitionMeta partitionKey =
            new AbstractKafkaPartitioner.PartitionMeta(e.getKey(), tp.topic(), tp.partition());
        if (currentOffset.containsKey(partitionKey)) {
          kc.seekToOffset(tp, currentOffset.get(partitionKey));
        }
      }
    }

    consumers.put(e.getKey(), kc);
    Future<?> future = kafkaConsumerExecutor.submit(new ConsumerThread(e.getKey(), kc, this));
    kafkaConsumerThreads.add(future);
  }
}
 
Example 13
Source File: FileUtils.java    From streamx with Apache License 2.0 4 votes vote down vote up
public static String directoryName(String url, String topicsDir, TopicPartition topicPart) {
  String topic = topicPart.topic();
  int partition = topicPart.partition();
  return url + "/" + topicsDir + "/" + topic + "/" + partition;
}
 
Example 14
Source File: DefaultKafkaClusterProxy.java    From kafka-message-tool with MIT License 4 votes vote down vote up
private List<TopicsOffsetInfo> getTopicOffsetsFor(String consumerGroupId,
                                                  Map<TopicPartition, Object> topicPartitionsCurrentOffset) {
    final Set<TopicPartition> topicPartitions = topicPartitionsCurrentOffset.keySet();
    final List<TopicsOffsetInfo> result = new ArrayList<>();

    final KafkaConsumer<String, String> consumer = createOffsetInfoConsumerFor(consumerGroupId);
    final Map<TopicPartition, Long> beggingOffsets = consumer.beginningOffsets(topicPartitions);
    final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(topicPartitions);

    for (Map.Entry<TopicPartition, Long> entry : beggingOffsets.entrySet()) {

        final TopicPartition topicPartition = entry.getKey();

        if (!endOffsets.containsKey(topicPartition)) {
            continue;
        }

        String currentOffset = NOT_FOUND_STRING;
        String lag = NOT_FOUND_STRING;

        final Optional<Long> optionalOffsetForPartition = getOptionalOffsetForPartition(topicPartitionsCurrentOffset,
                                                                                        topicPartition);

        final String topicName = topicPartition.topic();
        final String partition = String.valueOf(topicPartition.partition());
        final Long startOffsetLong = entry.getValue();
        final String beggingOffset = String.valueOf(startOffsetLong);
        final Long endOffsetLong = endOffsets.get(topicPartition);
        final String endOffset = String.valueOf(endOffsetLong);
        final String msgCount = String.valueOf(endOffsetLong - startOffsetLong);

        if (optionalOffsetForPartition.isPresent()) {
            final Long currentOffsetLong = optionalOffsetForPartition.get();
            currentOffset = String.valueOf(currentOffsetLong);
            lag = String.valueOf(endOffsetLong - currentOffsetLong);
        }

        final TopicsOffsetInfo topicsOffsetInfo = new TopicsOffsetInfo(topicName,
                                                                       beggingOffset,
                                                                       endOffset,
                                                                       consumerGroupId,
                                                                       partition,
                                                                       msgCount,
                                                                       currentOffset,
                                                                       lag);
        result.add(topicsOffsetInfo);
    }

    Logger.debug("Topic offsets: " + result);
    return result;
}
 
Example 15
Source File: ConsumerLease.java    From nifi with Apache License 2.0 4 votes vote down vote up
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding) {
    this.initialOffset = initialRecord.offset();
    this.partition = topicPartition.partition();
    this.topic = topicPartition.topic();
    this.key = encodeKafkaKey(initialRecord.key(), keyEncoding);
}
 
Example 16
Source File: ConsumerLease.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding) {
    this.initialOffset = initialRecord.offset();
    this.partition = topicPartition.partition();
    this.topic = topicPartition.topic();
    this.key = encodeKafkaKey(initialRecord.key(), keyEncoding);
}
 
Example 17
Source File: BackupSinkTask.java    From kafka-backup with Apache License 2.0 4 votes vote down vote up
public void open(Collection<TopicPartition> partitions) {
    super.open(partitions);
    try {
        for (TopicPartition topicPartition : partitions) {
            Path topicDir = Paths.get(targetDir.toString(), topicPartition.topic());
            Files.createDirectories(topicDir);
            PartitionWriter partitionWriter = new PartitionWriter(topicPartition.topic(), topicPartition.partition(), topicDir, maxSegmentSizeBytes);
            long lastWrittenOffset = partitionWriter.lastWrittenOffset();

            // Note that we must *always* request that we seek to an offset here. Currently the
            // framework will still commit Kafka offsets even though we track our own (see KAFKA-3462),
            // which can result in accidentally using that offset if one was committed but no files
            // were written to disk. To protect against this, even if we
            // just want to start at offset 0 or reset to the earliest offset, we specify that
            // explicitly to forcibly override any committed offsets.

            if (lastWrittenOffset > 0) {
                context.offset(topicPartition, lastWrittenOffset + 1);
                log.debug("Initialized Topic {}, Partition {}. Last written offset: {}"
                        , topicPartition.topic(), topicPartition.partition(), lastWrittenOffset);
            } else {
                // The offset was not found, so rather than forcibly set the offset to 0 we let the
                // consumer decide where to start based upon standard consumer offsets (if available)
                // or the consumer's `auto.offset.reset` configuration

                // if we are in snapshot mode, then just start at zero.
                if (config.snapShotMode()) {
                    context.offset(topicPartition, 0);
                }

                log.info("Resetting offset for {} based upon existing consumer group offsets or, if "
                        + "there are none, the consumer's 'auto.offset.reset' value.", topicPartition);
            }

            this.partitionWriters.put(topicPartition, partitionWriter);
            this.currentOffsets.put(topicPartition, lastWrittenOffset);
        }
        if ( config.snapShotMode() ) {
            this.endOffsets = endOffsetReader.getEndOffsets(partitions);
            this.terminateIfCompleted();
        }
        if (partitions.isEmpty()) {
            log.info("No partitions assigned to BackupSinkTask");
        }
    } catch (IOException | SegmentIndex.IndexException | PartitionIndex.IndexException e) {
        throw new RuntimeException(e);
    }
}
 
Example 18
Source File: KafkaSystemConsumer.java    From samza with Apache License 2.0 4 votes vote down vote up
protected static TopicAndPartition toTopicAndPartition(TopicPartition topicPartition) {
  return new TopicAndPartition(topicPartition.topic(), topicPartition.partition());
}
 
Example 19
Source File: KafkaDatastreamStatesResponse.java    From brooklin with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Override
public Object deserializeKey(String key, DeserializationContext ctxt) throws IOException {
  TopicPartition tp = topicPartitionFromString(key);
  return new FlushlessEventProducerHandler.SourcePartition(tp.topic(), tp.partition());
}
 
Example 20
Source File: TracingConsumerInterceptor.java    From brave-kafka-interceptor with Apache License 2.0 4 votes vote down vote up
@Override public ConsumerRecords<K, V> onConsume(ConsumerRecords<K, V> records) {
  if (records.isEmpty() || tracing.isNoop()) return records;
  Map<String, Span> consumerSpansForTopic = new LinkedHashMap<>();
  for (TopicPartition partition : records.partitions()) {
    String topic = partition.topic();
    List<ConsumerRecord<K, V>> recordsInPartition = records.records(partition);
    for (ConsumerRecord<K, V> record : recordsInPartition) {
      TraceContextOrSamplingFlags extracted = extractor.extract(record.headers());
      // If we extracted neither a trace context, nor request-scoped data
      // (extra),
      // make or reuse a span for this topic
      if (extracted.samplingFlags() != null && extracted.extra().isEmpty()) {
        Span consumerSpanForTopic = consumerSpansForTopic.get(topic);
        if (consumerSpanForTopic == null) {
          consumerSpansForTopic.put(topic,
            consumerSpanForTopic = tracing.tracer()
              .nextSpan(extracted)
              .name(SPAN_NAME)
              .kind(Span.Kind.CONSUMER)
              .remoteServiceName(remoteServiceName)
              .tag(KafkaInterceptorTagKey.KAFKA_TOPIC, topic)
              .tag(KafkaInterceptorTagKey.KAFKA_GROUP_ID,
                configuration.getString(ConsumerConfig.GROUP_ID_CONFIG))
              .tag(KafkaInterceptorTagKey.KAFKA_CLIENT_ID,
                configuration.getString(ConsumerConfig.CLIENT_ID_CONFIG))
              .start());
        }
        // no need to remove propagation headers as we failed to extract
        // anything
        injector.inject(consumerSpanForTopic.context(), record.headers());
      } else { // we extracted request-scoped data, so cannot share a consumer
        // span.
        Span span = tracing.tracer().nextSpan(extracted);
        if (!span.isNoop()) {
          span.name(SPAN_NAME)
            .kind(Span.Kind.CONSUMER)
            .remoteServiceName(remoteServiceName)
            .tag(KafkaInterceptorTagKey.KAFKA_TOPIC, topic)
            .tag(KafkaInterceptorTagKey.KAFKA_GROUP_ID,
              configuration.getString(ConsumerConfig.GROUP_ID_CONFIG))
            .tag(KafkaInterceptorTagKey.KAFKA_CLIENT_ID,
              configuration.getString(ConsumerConfig.CLIENT_ID_CONFIG))
            .start()
            .finish(); // span won't be shared by other records
        }
        // remove prior propagation headers from the record
        tracing.propagation().keys().forEach(key -> record.headers().remove(key));
        injector.inject(span.context(), record.headers());
      }
    }
  }
  consumerSpansForTopic.values().forEach(Span::finish);
  return records;
}