Java Code Examples for org.apache.kafka.common.TopicPartition#partition()

The following examples show how to use org.apache.kafka.common.TopicPartition#partition() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SecorConsumerRebalanceListener.java    From secor with Apache License 2.0 6 votes vote down vote up
private Map<TopicPartition, Long> getCommittedOffsets(Collection<TopicPartition> assignment) {
    Map<TopicPartition, Long> committedOffsets = new HashMap<>();

    for (TopicPartition topicPartition : assignment) {
        com.pinterest.secor.common.TopicPartition secorTopicPartition =
                new com.pinterest.secor.common.TopicPartition(topicPartition.topic(), topicPartition.partition());
        try {
            long committedOffset = mZookeeperConnector.getCommittedOffsetCount(secorTopicPartition);
            committedOffsets.put(topicPartition, committedOffset);
        } catch (Exception e) {
            LOG.trace("Unable to fetch committed offsets from zookeeper", e);
            throw new RuntimeException(e);
        }
    }

    return committedOffsets;
}
 
Example 2
Source File: KafkaMonitor.java    From mirus with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
private String applyRoutersToTopic(String topic) {
  TopicPartition topicPartition = new TopicPartition(topic, 0);
  Map<String, Object> sourcePartition = TopicPartitionSerDe.asMap(topicPartition);
  SourceRecord record =
      new SourceRecord(
          sourcePartition,
          null,
          topicPartition.topic(),
          topicPartition.partition(),
          Schema.BYTES_SCHEMA,
          null,
          Schema.OPTIONAL_BYTES_SCHEMA,
          null);
  for (Transformation<SourceRecord> transform : this.routers) {
    record = transform.apply(record);
  }
  return record.topic();
}
 
Example 3
Source File: TopicPartitionCounterTest.java    From connect-utils with Apache License 2.0 6 votes vote down vote up
@Test
public void incrementSinkRecord() {
  final TopicPartition topicPartition = new TopicPartition("test", 1);
  final Map<TopicPartition, Long> expected = ImmutableMap.of(
      topicPartition, 123L
  );

  SinkRecord record = new SinkRecord(
      topicPartition.topic(),
      topicPartition.partition(),
      Schema.STRING_SCHEMA,
      "",
      Schema.STRING_SCHEMA,
      "",
      123L
  );
  this.counter.increment(record);
  assertEquals(expected, this.counter.data());
}
 
Example 4
Source File: ZookeeperCheckpointManager.java    From uReplicator with Apache License 2.0 6 votes vote down vote up
public Long fetchOffset(TopicPartition topicPartition) {
  ZKGroupTopicDirs dirs = new ZKGroupTopicDirs(groupId, topicPartition.topic());
  String path = dirs.consumerOffsetDir() + "/" + topicPartition.partition();
  if (!commitZkClient.exists(path)) {
    return -1L;
  }
  String offset = commitZkClient.readData(path).toString();
  if (StringUtils.isEmpty(offset)) {
    return -1L;
  }
  try {
    return Long.parseLong(offset);
  } catch (Exception e) {
    LOGGER.warn("Parse offset {} for topic partition failed, zk path: {}", offset, path);
    return -1L;
  }
}
 
Example 5
Source File: ConsumerLease.java    From nifi with Apache License 2.0 5 votes vote down vote up
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding, final RecordSetWriter recordWriter) {
    this.initialOffset = initialRecord.offset();
    this.initialTimestamp = initialRecord.timestamp();
    this.partition = topicPartition.partition();
    this.topic = topicPartition.topic();
    this.recordWriter = recordWriter;
    this.key = encodeKafkaKey(initialRecord.key(), keyEncoding);
}
 
Example 6
Source File: GroupMetrics.java    From kafka-metrics with Apache License 2.0 5 votes vote down vote up
private MetricName NewName(String group, TopicPartition tp) {
    return new MetricName(
            "kafka.groups",
            "Group",
            name,
            "",
            "kafka.consumer:type=Group,name=" + name
                    + ",group=" + group
                    + ",topic=" + tp.topic()
                    + ",partition=" + tp.partition());
}
 
Example 7
Source File: ClientKafkaMonitor.java    From Kafdrop with Apache License 2.0 5 votes vote down vote up
private ConsumerPartitionVO createConsumerPartition(String groupId,
                                                    TopicPartition topicPartition,
                                                    OffsetAndMetadata offset)
{
   ConsumerPartitionVO vo = new ConsumerPartitionVO(groupId, topicPartition.topic(), topicPartition.partition());
   vo.setConsumerOffset(new ConsumerOffsetVO(-1, offset.offset()));
   return vo;
}
 
Example 8
Source File: KafkaUtils.java    From doctorkafka with Apache License 2.0 5 votes vote down vote up
@Override
public int compare(TopicPartition x, TopicPartition y) {
  int result = x.topic().compareTo(y.topic());
  if (result == 0) {
    result = x.partition() - y.partition();
  }
  return result;
}
 
Example 9
Source File: ConsumerLease.java    From nifi with Apache License 2.0 5 votes vote down vote up
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding, final RecordSetWriter recordWriter) {
    this.initialOffset = initialRecord.offset();
    this.initialTimestamp = initialRecord.timestamp();
    this.partition = topicPartition.partition();
    this.topic = topicPartition.topic();
    this.recordWriter = recordWriter;
    this.key = encodeKafkaKey(initialRecord.key(), keyEncoding);
}
 
Example 10
Source File: ConsumerLease.java    From nifi with Apache License 2.0 5 votes vote down vote up
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding, final RecordSetWriter recordWriter) {
    this.initialOffset = initialRecord.offset();
    this.initialTimestamp = initialRecord.timestamp();
    this.partition = topicPartition.partition();
    this.topic = topicPartition.topic();
    this.recordWriter = recordWriter;
    this.key = encodeKafkaKey(initialRecord.key(), keyEncoding);
}
 
Example 11
Source File: ZookeeperCheckpointManager.java    From uReplicator with Apache License 2.0 5 votes vote down vote up
private void commitOffsetToZookeeper(TopicPartition topicPartition, long offset) {
  if (!offsetCheckpoints.containsKey(topicPartition)
      || offsetCheckpoints.get(topicPartition) != offset) {
    ZKGroupTopicDirs dirs = new ZKGroupTopicDirs(groupId, topicPartition.topic());
    String path = dirs.consumerOffsetDir() + "/" + topicPartition.partition();
    if (!commitZkClient.exists(path)) {
      commitZkClient.createPersistent(path, true);
    }
    commitZkClient.writeData(path,
        String.valueOf(offset));
    offsetCheckpoints.put(topicPartition, offset);
  }
}
 
Example 12
Source File: KafkaUtil.java    From samza with Apache License 2.0 4 votes vote down vote up
public static SystemStreamPartition toSystemStreamPartition(String systemName, TopicPartition topicPartition) {
  Partition partition = new Partition(topicPartition.partition());
  return new SystemStreamPartition(systemName, topicPartition.topic(), partition);
}
 
Example 13
Source File: BackupSinkTask.java    From kafka-backup with Apache License 2.0 4 votes vote down vote up
public void open(Collection<TopicPartition> partitions) {
    super.open(partitions);
    try {
        for (TopicPartition topicPartition : partitions) {
            Path topicDir = Paths.get(targetDir.toString(), topicPartition.topic());
            Files.createDirectories(topicDir);
            PartitionWriter partitionWriter = new PartitionWriter(topicPartition.topic(), topicPartition.partition(), topicDir, maxSegmentSizeBytes);
            long lastWrittenOffset = partitionWriter.lastWrittenOffset();

            // Note that we must *always* request that we seek to an offset here. Currently the
            // framework will still commit Kafka offsets even though we track our own (see KAFKA-3462),
            // which can result in accidentally using that offset if one was committed but no files
            // were written to disk. To protect against this, even if we
            // just want to start at offset 0 or reset to the earliest offset, we specify that
            // explicitly to forcibly override any committed offsets.

            if (lastWrittenOffset > 0) {
                context.offset(topicPartition, lastWrittenOffset + 1);
                log.debug("Initialized Topic {}, Partition {}. Last written offset: {}"
                        , topicPartition.topic(), topicPartition.partition(), lastWrittenOffset);
            } else {
                // The offset was not found, so rather than forcibly set the offset to 0 we let the
                // consumer decide where to start based upon standard consumer offsets (if available)
                // or the consumer's `auto.offset.reset` configuration

                // if we are in snapshot mode, then just start at zero.
                if (config.snapShotMode()) {
                    context.offset(topicPartition, 0);
                }

                log.info("Resetting offset for {} based upon existing consumer group offsets or, if "
                        + "there are none, the consumer's 'auto.offset.reset' value.", topicPartition);
            }

            this.partitionWriters.put(topicPartition, partitionWriter);
            this.currentOffsets.put(topicPartition, lastWrittenOffset);
        }
        if ( config.snapShotMode() ) {
            this.endOffsets = endOffsetReader.getEndOffsets(partitions);
            this.terminateIfCompleted();
        }
        if (partitions.isEmpty()) {
            log.info("No partitions assigned to BackupSinkTask");
        }
    } catch (IOException | SegmentIndex.IndexException | PartitionIndex.IndexException e) {
        throw new RuntimeException(e);
    }
}
 
Example 14
Source File: KafkaDatastreamStatesResponse.java    From brooklin with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Override
public Object deserializeKey(String key, DeserializationContext ctxt) throws IOException {
  TopicPartition tp = topicPartitionFromString(key);
  return new FlushlessEventProducerHandler.SourcePartition(tp.topic(), tp.partition());
}
 
Example 15
Source File: PostgreSQLSinkTask.java    From kafka-sink-pg-json with MIT License 4 votes vote down vote up
/**
 * Flushes content to the database
 * @param offsets map of offsets being flushed
 * @throws ConnectException if flush failed
 */
@Override
public void flush(Map<TopicPartition, OffsetAndMetadata> offsets) throws ConnectException {
  
    fLog.trace("Flush start at "+System.currentTimeMillis());
    
    try {
      
      if (iDelivery>FASTEST)//if guaranteed or synchronized
        iWriter.flush();//flush table writes
      
      if (iDelivery==SYNCHRONIZED) {//if synchronized delivery
        
        /* create topic, partition and offset arrays for database flush function call */
        
        int size=offsets.size();//get number of flush map entries
        String[] topicArray=new String[size];//create array for topics
        Integer[] partitionArray=new Integer[size];//create array for partitions 
        Long[] offsetArray=new Long[size];//create array for offsets

        /* populate topic, partition and offset arrays */
        
        Iterator<Map.Entry<TopicPartition, OffsetAndMetadata>> iterator=offsets.entrySet().iterator();//create map iterator
        for (int i=0;i<size;++i) {//for each flush map entry
          Entry<TopicPartition, OffsetAndMetadata> entry=iterator.next();//get next entry
          TopicPartition key=entry.getKey();//get topic partition key
          OffsetAndMetadata value=entry.getValue();//get offset value
          topicArray[i]=key.topic();//put topic into array
          partitionArray[i]=key.partition();//put partition in to array
          offsetArray[i]=value.offset();//put offset into array                        
        }//for each flush map entry

        /* bind arays to flush statement */
        
        iFlushStatement.setArray(1, iConnection.createArrayOf("varchar", topicArray));//bind topic array
        iFlushStatement.setArray(2, iConnection.createArrayOf("integer", partitionArray));//bind partition array
        iFlushStatement.setArray(3, iConnection.createArrayOf("bigint", offsetArray));//bind offset array
        
        /* execute the database flush function */
        
        iFlushStatement.executeQuery();
        
      }//if synchronized delivery
      
    } catch (SQLException | IOException exception) {
      throw new ConnectException(exception);
    }//try{}
    
    fLog.trace("Flush stop at "+System.currentTimeMillis());
     
}
 
Example 16
Source File: FileUtils.java    From streamx with Apache License 2.0 4 votes vote down vote up
public static String directoryName(String url, String topicsDir, TopicPartition topicPart) {
  String topic = topicPart.topic();
  int partition = topicPart.partition();
  return url + "/" + topicsDir + "/" + topic + "/" + partition;
}
 
Example 17
Source File: KafkaSystemConsumer.java    From samza with Apache License 2.0 4 votes vote down vote up
protected static TopicAndPartition toTopicAndPartition(TopicPartition topicPartition) {
  return new TopicAndPartition(topicPartition.topic(), topicPartition.partition());
}
 
Example 18
Source File: ConsumerLease.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding) {
    this.initialOffset = initialRecord.offset();
    this.partition = topicPartition.partition();
    this.topic = topicPartition.topic();
    this.key = encodeKafkaKey(initialRecord.key(), keyEncoding);
}
 
Example 19
Source File: ConsumerLease.java    From localization_nifi with Apache License 2.0 4 votes vote down vote up
private BundleTracker(final ConsumerRecord<byte[], byte[]> initialRecord, final TopicPartition topicPartition, final String keyEncoding) {
    this.initialOffset = initialRecord.offset();
    this.partition = topicPartition.partition();
    this.topic = topicPartition.topic();
    this.key = encodeKafkaKey(initialRecord.key(), keyEncoding);
}
 
Example 20
Source File: SamplingUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 2 votes vote down vote up
/**
 * Removes any dots that potentially exist in the given parameter.
 *
 * @param tp TopicPartition that may contain dots.
 * @return TopicPartition whose dots have been removed from the given topic name.
 */
private static TopicPartition partitionHandleDotInTopicName(TopicPartition tp) {
  // In the reported metrics, the "." in the topic name will be replaced by "_".
  return !tp.topic().contains(".") ? tp :
         new TopicPartition(replaceDotsWithUnderscores(tp.topic()), tp.partition());
}