org.apache.kafka.clients.consumer.OffsetAndMetadata Java Examples

The following examples show how to use org.apache.kafka.clients.consumer.OffsetAndMetadata. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source Project: BigData-In-Practice   Author: whirlys   File: OffsetCommitSyncPartition.java    License: Apache License 2.0 8 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create();

    try {
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (TopicPartition partition : records.partitions()) {
                List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
                for (ConsumerRecord<String, String> record : partitionRecords) {
                    //do some logical processing.
                }
                long lastConsumedOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
                consumer.commitSync(Collections.singletonMap(partition,
                        new OffsetAndMetadata(lastConsumedOffset + 1)));
            }
        }
    } finally {
        consumer.close();
    }
}
 
Example #2
Source Project: beast   Author: gojek   File: Records.java    License: Apache License 2.0 6 votes vote down vote up
public Map<TopicPartition, OffsetAndMetadata> getPartitionsCommitOffset() {
    // kafka commit requires offset + 1 (next offset)
    if (!partitionsCommitOffset.isEmpty()) {
        return partitionsCommitOffset;
    }
    records.forEach(r -> {
        OffsetInfo offsetInfo = r.getOffsetInfo();
        TopicPartition key = offsetInfo.getTopicPartition();
        OffsetMetadata value = new OffsetMetadata(offsetInfo.getOffset() + 1);
        OffsetMetadata previousOffset = (OffsetMetadata) partitionsCommitOffset.getOrDefault(key, new OffsetMetadata(Integer.MIN_VALUE));
        if (previousOffset.compareTo(value) < 0) {
            partitionsCommitOffset.put(key, value);
        }
    });
    return partitionsCommitOffset;
}
 
Example #3
Source Project: localization_nifi   Author: wangrenlei   File: ConsumerLease.java    License: Apache License 2.0 6 votes vote down vote up
private void processRecords(final ConsumerRecords<byte[], byte[]> records) {

        records.partitions().stream().forEach(partition -> {
            List<ConsumerRecord<byte[], byte[]>> messages = records.records(partition);
            if (!messages.isEmpty()) {
                //update maximum offset map for this topic partition
                long maxOffset = messages.stream()
                        .mapToLong(record -> record.offset())
                        .max()
                        .getAsLong();
                uncommittedOffsetsMap.put(partition, new OffsetAndMetadata(maxOffset + 1L));

                //write records to content repository and session
                if (demarcatorBytes == null) {
                    totalFlowFiles += messages.size();
                    messages.stream().forEach(message -> {
                        writeData(getProcessSession(), message, partition);
                    });
                } else {
                    writeData(getProcessSession(), messages, partition);
                }
            }
        });
    }
 
Example #4
Source Project: spring-cloud-stream-binder-kafka   Author: spring-cloud   File: KafkaBinderMetricsTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void shouldSumUpPartitionsLags() {
	Map<TopicPartition, Long> endOffsets = new HashMap<>();
	endOffsets.put(new TopicPartition(TEST_TOPIC, 0), 1000L);
	endOffsets.put(new TopicPartition(TEST_TOPIC, 1), 1000L);
	org.mockito.BDDMockito
			.given(consumer.endOffsets(ArgumentMatchers.anyCollection()))
			.willReturn(endOffsets);
	org.mockito.BDDMockito
			.given(consumer.committed(ArgumentMatchers.any(TopicPartition.class)))
			.willReturn(new OffsetAndMetadata(500));
	List<PartitionInfo> partitions = partitions(new Node(0, null, 0),
			new Node(0, null, 0));
	topicsInUse.put(TEST_TOPIC,
			new TopicInformation("group2-metrics", partitions, false));
	org.mockito.BDDMockito.given(consumer.partitionsFor(TEST_TOPIC))
			.willReturn(partitions);
	metrics.bindTo(meterRegistry);
	assertThat(meterRegistry.getMeters()).hasSize(1);
	assertThat(meterRegistry.get(KafkaBinderMetrics.METRIC_NAME)
			.tag("group", "group2-metrics").tag("topic", TEST_TOPIC).gauge().value())
					.isEqualTo(1000.0);
}
 
Example #5
Source Project: common-kafka   Author: cerner   File: ProcessingPartition.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Returns the reset offset used in situations where the consumer has no committed offset for a partition, or its committed
 * offset is out of range. The returned offset is ensured to be committed, if {@link ProcessingConfig#getCommitInitialOffset()
 * allowed} by the configuration.
 *
 * @return the reset offset
 */
private long getCommittedResetOffset() {
    // Get the reset offset
    long resetOffset = getResetOffset();

    LOGGER.debug("Using reset offset [{}] for partition [{}] as last committed offset", resetOffset, topicPartition);

    // Consumer doesn't have an offset so try to commit the offset. This can be helpful for monitoring in case
    // there are no messages in the queue or processing is failing
    if (config.getCommitInitialOffset()) {
        try {
            consumer.commitSync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(resetOffset)));
        } catch (KafkaException e) {
            LOGGER.warn("Unable to commit reset offset {} during initialization of partition {} for group {}", resetOffset,
                    topicPartition, config.getGroupId(), e);
        }
    }

    return resetOffset;
}
 
Example #6
Source Project: beast   Author: gojek   File: OffsetMapQueueSinkTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void shouldPushMultipleMessagesToQueue() throws InterruptedException {
    BlockingQueue<Map<TopicPartition, OffsetAndMetadata>> queue = new LinkedBlockingQueue<>();
    queueSink = new OffsetMapQueueSink(queue, queueConfig);
    Records messages = new Records(Arrays.asList(new Record(offsetInfo, new HashMap<>()), new Record(offsetInfo, new HashMap<>())));

    Status status = queueSink.push(messages);

    assertTrue(status.isSuccess());
    assertEquals(1, queue.size());
    Map<TopicPartition, OffsetAndMetadata> partitionsCommitOffset = queue.take();
    assertEquals(1, partitionsCommitOffset.size());
    Map.Entry<TopicPartition, OffsetAndMetadata> offset = partitionsCommitOffset.entrySet().iterator().next();
    assertEquals(offset.getKey().topic(), "default-topic");
    assertEquals(offset.getKey().partition(), 0);
    assertEquals(offset.getValue().offset(), 1);
}
 
Example #7
Source Project: common-kafka   Author: cerner   File: ConsumerOffsetClientTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void getCommittedOffsets() {
    Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(new TopicPartition("topic1", 0), 123L);
    offsets.put(new TopicPartition("topic1", 1), 234L);
    offsets.put(new TopicPartition("topic2", 0), -1L);
    offsets.put(new TopicPartition("topic2", 1), -1L);

    when(consumer.partitionsFor("topic1")).thenReturn(Arrays.asList(
            new PartitionInfo("topic1", 0, null, null, null),
            new PartitionInfo("topic1", 1, null, null, null)));
    when(consumer.partitionsFor("topic2")).thenReturn(Arrays.asList(
            new PartitionInfo("topic2", 0, null, null, null),
            new PartitionInfo("topic2", 1, null, null, null)));

    when(consumer.committed(new TopicPartition("topic1", 0))).thenReturn(new OffsetAndMetadata(123L));
    when(consumer.committed(new TopicPartition("topic1", 1))).thenReturn(new OffsetAndMetadata(234L));

    assertThat(client.getCommittedOffsets(Arrays.asList("topic1", "topic2")), is(offsets));
}
 
Example #8
private void seedBadOffset(String topic, Properties openSourceConsumerProperties, long offset, long diffFromEnd) {
  try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(openSourceConsumerProperties)) {
    List<PartitionInfo> partitionsForTopic = kafkaConsumer.partitionsFor(topic);
    List<TopicPartition> tps = new ArrayList<>();
    for (int i = 0; i < partitionsForTopic.size(); i++) {
      tps.add(new TopicPartition(topic, i));
    }
    Map<TopicPartition, Long> endOffsets = kafkaConsumer.endOffsets(tps);
    Map<TopicPartition, OffsetAndMetadata> badOffsetMap = new HashMap<>();
    for (Map.Entry<TopicPartition, Long> endOffsetEntry : endOffsets.entrySet()) {
      if (endOffsetEntry.getValue() == null || endOffsetEntry.getValue() == -1) {
        continue;
      }
      long badOffset = endOffsetEntry.getValue() + diffFromEnd;
      OffsetAndMetadata om = new OffsetAndMetadata(offset, badOffset + ",");
      badOffsetMap.put(endOffsetEntry.getKey(), om);
    }
    kafkaConsumer.commitSync(badOffsetMap);
  }
}
 
Example #9
Source Project: Flink-CEPplus   Author: ljygz   File: KafkaConsumerThread.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tells this thread to commit a set of offsets. This method does not block, the committing
 * operation will happen asynchronously.
 *
 * <p>Only one commit operation may be pending at any time. If the committing takes longer than
 * the frequency with which this method is called, then some commits may be skipped due to being
 * superseded by newer ones.
 *
 * @param offsetsToCommit The offsets to commit
 * @param commitCallback callback when Kafka commit completes
 */
void setOffsetsToCommit(
		Map<TopicPartition, OffsetAndMetadata> offsetsToCommit,
		@Nonnull KafkaCommitCallback commitCallback) {

	// record the work to be committed by the main consumer thread and make sure the consumer notices that
	if (nextOffsetsToCommit.getAndSet(Tuple2.of(offsetsToCommit, commitCallback)) != null) {
		log.warn("Committing offsets to Kafka takes longer than the checkpoint interval. " +
				"Skipping commit of previous offsets because newer complete checkpoint offsets are available. " +
				"This does not compromise Flink's checkpoint integrity.");
	}

	// if the consumer is blocked in a poll() or handover operation, wake it up to commit soon
	handover.wakeupProducer();

	synchronized (consumerReassignmentLock) {
		if (consumer != null) {
			consumer.wakeup();
		} else {
			// the consumer is currently isolated for partition reassignment;
			// set this flag so that the wakeup state is restored once the reassignment is complete
			hasBufferedWakeup = true;
		}
	}
}
 
Example #10
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> topicPartitionOffsetAndMetadataMap, Exception e) {
  if (_userCallback != null) {
    Map<TopicPartition, OffsetAndMetadata> userOffsetMap = topicPartitionOffsetAndMetadataMap;
    if (topicPartitionOffsetAndMetadataMap != null) {
      userOffsetMap = new HashMap<>();
      for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : topicPartitionOffsetAndMetadataMap.entrySet()) {
        String rawMetadata = entry.getValue().metadata();
        long userOffset = LiKafkaClientsUtils.offsetFromWrappedMetadata(rawMetadata);
        String userMetadata = LiKafkaClientsUtils.metadataFromWrappedMetadata(rawMetadata);
        userOffsetMap.put(entry.getKey(), new OffsetAndMetadata(userOffset, userMetadata));
      }
    }
    _userCallback.onComplete(userOffsetMap, e);
  }
}
 
Example #11
Source Project: bireme   Author: HashDataInc   File: KafkaPipeLine.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void commit() {
  HashMap<TopicPartition, OffsetAndMetadata> offsets =
      new HashMap<TopicPartition, OffsetAndMetadata>();

  partitionOffset.forEach((key, value) -> {
    String topic = key.split("\\+")[0];
    int partition = Integer.valueOf(key.split("\\+")[1]);
    offsets.put(new TopicPartition(topic, partition), new OffsetAndMetadata(value + 1));
  });

  consumer.commitSync(offsets);
  committed.set(true);
  partitionOffset.clear();

  // record the time being committed
  timerCTX.stop();

  stat.newestCompleted = newestRecord;
  stat.delay = new Date().getTime() - start.getTime();
}
 
Example #12
Source Project: flink   Author: flink-tpc-ds   File: KafkaConsumerThread.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tells this thread to commit a set of offsets. This method does not block, the committing
 * operation will happen asynchronously.
 *
 * <p>Only one commit operation may be pending at any time. If the committing takes longer than
 * the frequency with which this method is called, then some commits may be skipped due to being
 * superseded by newer ones.
 *
 * @param offsetsToCommit The offsets to commit
 * @param commitCallback callback when Kafka commit completes
 */
void setOffsetsToCommit(
		Map<TopicPartition, OffsetAndMetadata> offsetsToCommit,
		@Nonnull KafkaCommitCallback commitCallback) {

	// record the work to be committed by the main consumer thread and make sure the consumer notices that
	if (nextOffsetsToCommit.getAndSet(Tuple2.of(offsetsToCommit, commitCallback)) != null) {
		log.warn("Committing offsets to Kafka takes longer than the checkpoint interval. " +
				"Skipping commit of previous offsets because newer complete checkpoint offsets are available. " +
				"This does not compromise Flink's checkpoint integrity.");
	}

	// if the consumer is blocked in a poll() or handover operation, wake it up to commit soon
	handover.wakeupProducer();

	synchronized (consumerReassignmentLock) {
		if (consumer != null) {
			consumer.wakeup();
		} else {
			// the consumer is currently isolated for partition reassignment;
			// set this flag so that the wakeup state is restored once the reassignment is complete
			hasBufferedWakeup = true;
		}
	}
}
 
Example #13
Source Project: common-kafka   Author: cerner   File: ProcessingPartitionTest.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void before() {
    properties = new Properties();
    properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "my-group");
    properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, OffsetResetStrategy.EARLIEST.toString().toLowerCase());

    config = new ProcessingConfig(properties);
    topicPartition = new TopicPartition("topic", 1);

    when(consumer.committed(topicPartition)).thenReturn(new OffsetAndMetadata(0L));

    partition = new MockProcessingPartition<>(topicPartition, config, consumer);

    logAppender = new TestLogAppender();
    RootLogger.getRootLogger().addAppender(logAppender);
}
 
Example #14
Source Project: kbear   Author: ctripcorp   File: ConsumerTest.java    License: Apache License 2.0 6 votes vote down vote up
protected void commitSync(java.util.function.Consumer<Consumer<String, String>> committer)
        throws InterruptedException {
    produceMessages();

    try (Consumer<String, String> consumer = createConsumerWithoutAutoCommit()) {
        consumer.subscribe(_topics);
        pollDurationTimeout(consumer);

        OffsetAndMetadata committed = consumer.committed(_topicPartition);
        System.out.println("committed: " + committed);
        committer.accept(consumer);
        OffsetAndMetadata committed2 = consumer.committed(_topicPartition);
        System.out.println("committed2: " + committed2);
        Assert.assertTrue(committed2.offset() > committed.offset());
    }
}
 
Example #15
Source Project: kafka-workers   Author: RTBHOUSE   File: OffsetCommitCallbackImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
    if (exception != null) {
        if (exception instanceof RetriableCommitFailedException) {
            final int failureNum = failuresInRow.incrementAndGet();
            if (failureNum <= maxFailuresInRow) {
                logger.warn("retriable commit failed exception: {}, offsets: {}, failureNum: {}/{}",
                        exception, offsets, failureNum, maxFailuresInRow);
            } else {
                logger.error("retriable commit failed exception: {}, offsets: {}, failureNum: {}/{}",
                        exception, offsets, failureNum, maxFailuresInRow);
                consumerThread.shutdown(new FailedCommitException(exception));
            }
        } else {
            logger.error("commit failed exception: {}, offsets: {}", exception, offsets);
            consumerThread.shutdown(new FailedCommitException(exception));
        }
    } else {
        logger.debug("commit succeeded, offsets: {}", offsets);
        failuresInRow.set(0);
        for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) {
            TopicPartition partition = entry.getKey();
            long offset = entry.getValue().offset();
            metrics.recordSensor(WorkersMetrics.COMMITTED_OFFSET_METRIC, partition, offset);
        }
        offsetsState.removeCommitted(offsets);
    }
}
 
Example #16
Source Project: DataflowTemplates   Author: GoogleCloudPlatform   File: ProducerSpEL.java    License: Apache License 2.0 5 votes vote down vote up
static void sendOffsetsToTransaction(
    Producer<?, ?> producer,
    Map<TopicPartition, OffsetAndMetadata> offsets,
    String consumerGroupId) {
  ensureTransactionsSupport();
  invoke(sendOffsetsToTransactionMethod, producer, offsets, consumerGroupId);
}
 
Example #17
Source Project: atlas   Author: apache   File: KafkaConsumerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testCommitIsNotCalledIfAutoCommitEnabled() {
    TopicPartition     tp       = new TopicPartition(ATLAS_HOOK_TOPIC,0);
    AtlasKafkaConsumer consumer = new AtlasKafkaConsumer(NotificationType.HOOK, kafkaConsumer, true , 100L);

    consumer.commit(tp, 1);

    verify(kafkaConsumer, never()).commitSync(Collections.singletonMap(tp, new OffsetAndMetadata(1)));
}
 
Example #18
Source Project: common-kafka   Author: cerner   File: KafkaSinkTask.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void flush(Map<TopicPartition, OffsetAndMetadata> offsets) {
    LOGGER.debug("Flushing kafka sink");

    try {
        producer.flush();
    } catch (IOException e) {
        LOGGER.debug("IOException on flush, re-throwing as retriable", e);
        // Re-throw exception as connect retriable since we just want connect to keep retrying forever
        throw new RetriableException(e);
    }

    super.flush(offsets);
}
 
Example #19
Source Project: kafka-workers   Author: RTBHOUSE   File: ConsumerThread.java    License: Apache License 2.0 5 votes vote down vote up
private void commitAsync() {
    Instant minCreatedAt = Instant.now().minus(consumerProcessingTimeout);
    Map<TopicPartition, OffsetAndMetadata> offsets = offsetsState.getOffsetsToCommit(minCreatedAt);
    logger.debug("committing offsets async: {}", offsets);
    if (!offsets.isEmpty()) {
        consumer.commitAsync(offsets, commitCallback);
    }
}
 
Example #20
Source Project: beast   Author: gojek   File: OffsetAcknowledger.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public boolean acknowledge(Map<TopicPartition, OffsetAndMetadata> offsets) {
    boolean status = partitionOffsetAck.add(offsets);
    statsClient.gauge("queue.elements,name=ack", partitionOffsetAck.size());
    log.debug("Acknowledged by bq sink: {} status: {}", offsets, status);
    return status;
}
 
Example #21
Source Project: storm_spring_boot_demo   Author: Paleozoic   File: KafkaSpout.java    License: MIT License 5 votes vote down vote up
private void initialize(Collection<TopicPartition> partitions) {
    if (!consumerAutoCommitMode) {
        acked.keySet().retainAll(partitions);   // remove from acked all partitions that are no longer assigned to this spout
    }

    retryService.retainAll(partitions);

    /*
     * Emitted messages for partitions that are no longer assigned to this spout can't
     * be acked and should not be retried, hence remove them from emitted collection.
    */
    Set<TopicPartition> partitionsSet = new HashSet<>(partitions);
    Iterator<KafkaSpoutMessageId> msgIdIterator = emitted.iterator();
    while (msgIdIterator.hasNext()) {
        KafkaSpoutMessageId msgId = msgIdIterator.next();
        if (!partitionsSet.contains(msgId.getTopicPartition())) {
            msgIdIterator.remove();
        }
    }

    for (TopicPartition tp : partitions) {
        final OffsetAndMetadata committedOffset = kafkaConsumer.committed(tp);
        final long fetchOffset = doSeek(tp, committedOffset);
        setAcked(tp, fetchOffset);
    }
    initialized = true;
    LOG.info("Initialization complete");
}
 
Example #22
@Override
public void commitSync(Map<TopicPartition, OffsetAndMetadata> offsets, Duration timeout) {
  try (
      @SuppressWarnings("unused") CloseableLock uLock = new CloseableLock(userLock);
      @SuppressWarnings("unused") CloseableLock srLock = new CloseableLock(delegateLock.readLock())
  ) {
    verifyOpen();
    delegate.commitSync(offsets, timeout);
  }
}
 
Example #23
Source Project: BigData-In-Practice   Author: whirlys   File: KafkaConsumerGroupService.java    License: Apache License 2.0 5 votes vote down vote up
public List<PartitionAssignmentState> collectGroupAssignment(
        String group) throws ExecutionException, InterruptedException {
    DescribeConsumerGroupsResult groupResult = adminClient
            .describeConsumerGroups(Collections.singleton(group));
    ConsumerGroupDescription description =
            groupResult.all().get().get(group);

    List<TopicPartition> assignedTps = new ArrayList<>();
    List<PartitionAssignmentState> rowsWithConsumer = new ArrayList<>();
    Collection<MemberDescription> members = description.members();
    if (members != null) {
        ListConsumerGroupOffsetsResult offsetResult = adminClient
                .listConsumerGroupOffsets(group);
        Map<TopicPartition, OffsetAndMetadata> offsets = offsetResult
                .partitionsToOffsetAndMetadata().get();
        if (offsets != null && !offsets.isEmpty()) {
            String state = description.state().toString();
            if (state.equals("Stable")) {
                rowsWithConsumer = getRowsWithConsumer(description, offsets,
                        members, assignedTps, group);
            }
        }
        List<PartitionAssignmentState> rowsWithoutConsumer =
                getRowsWithoutConsumer(description, offsets,
                        assignedTps, group);
        rowsWithConsumer.addAll(rowsWithoutConsumer);
    }
    return rowsWithConsumer;
}
 
Example #24
Source Project: snowflake-kafka-connector   Author: snowflakedb   File: SinkTaskTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testPreCommit()
{
  SnowflakeSinkTask sinkTask = new SnowflakeSinkTask();
  Map<TopicPartition, OffsetAndMetadata> offsetMap = new HashMap<>();

  sinkTask.preCommit(offsetMap);
  System.out.println("PreCommit test success");
}
 
Example #25
Source Project: beast   Author: gojek   File: OffsetStateTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void shouldReturnTrueWhenLastAckOffsetIsSameAndTimedOut() throws InterruptedException {
    int ackTimeout = 200;
    Map<TopicPartition, OffsetAndMetadata> currOffset = new HashMap<>();
    currOffset.put(new TopicPartition("topic", 1), new OffsetAndMetadata(101));
    OffsetState state = new OffsetState(acknowledgements, ackTimeout, 100);
    state.startTimer();
    state.resetOffset();

    Thread.sleep(ackTimeout + 10);
    assertTrue(state.shouldCloseConsumer(currOffset));
}
 
Example #26
Source Project: flink   Author: flink-tpc-ds   File: KafkaConsumerThread.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception ex) {
	commitInProgress = false;

	if (ex != null) {
		log.warn("Committing offsets to Kafka failed. This does not compromise Flink's checkpoints.", ex);
		internalCommitCallback.onException(ex);
	} else {
		internalCommitCallback.onSuccess();
	}
}
 
Example #27
Source Project: kafka-eagle   Author: smartloli   File: KafkaServiceImpl.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get kafka 0.10.x, 1.x, 2.x offset from topic.
 */
public String getKafkaOffset(String clusterAlias) {
	Properties prop = new Properties();
	prop.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, parseBrokerServer(clusterAlias));

	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.sasl.enable")) {
		sasl(prop, clusterAlias);
	}
	if (SystemConfigUtils.getBooleanProperty(clusterAlias + ".kafka.eagle.ssl.enable")) {
		ssl(prop, clusterAlias);
	}
	JSONArray targets = new JSONArray();
	AdminClient adminClient = null;
	try {
		adminClient = AdminClient.create(prop);
		ListConsumerGroupsResult consumerGroups = adminClient.listConsumerGroups();
		java.util.Iterator<ConsumerGroupListing> groups = consumerGroups.all().get().iterator();
		while (groups.hasNext()) {
			String groupId = groups.next().groupId();
			if (!groupId.contains("kafka.eagle")) {
				ListConsumerGroupOffsetsResult offsets = adminClient.listConsumerGroupOffsets(groupId);
				for (Entry<TopicPartition, OffsetAndMetadata> entry : offsets.partitionsToOffsetAndMetadata().get().entrySet()) {
					JSONObject object = new JSONObject();
					object.put("group", groupId);
					object.put("topic", entry.getKey().topic());
					object.put("partition", entry.getKey().partition());
					object.put("offset", entry.getValue().offset());
					object.put("timestamp", CalendarUtils.getDate());
					targets.add(object);
				}
			}
		}
	} catch (Exception e) {
		LOG.error("Get consumer offset has error, msg is " + e.getMessage());
		e.printStackTrace();
	} finally {
		adminClient.close();
	}
	return targets.toJSONString();
}
 
Example #28
Source Project: beast   Author: gojek   File: OffsetCommitWorkerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() {
    pollTimeout = 200;
    offsetBatchDuration = 1000;
    ackTimeoutTime = 2000;
    queueConfig = new QueueConfig(pollTimeout);
    commitPartitionsOffset = new HashMap<TopicPartition, OffsetAndMetadata>() {{
        put(new TopicPartition("topic", 0), new OffsetAndMetadata(1));
    }};
    CopyOnWriteArraySet<Map<TopicPartition, OffsetAndMetadata>> ackSet = new CopyOnWriteArraySet<>();
    acknowledgements = Collections.synchronizedSet(ackSet);
    offsetState = new OffsetState(acknowledgements, ackTimeoutTime, offsetBatchDuration);
    workerState = new WorkerState();
    offsetAcknowledger = new OffsetAcknowledger(acknowledgements);
}
 
Example #29
Source Project: Kafdrop   Author: HomeAdvisor   File: ClientKafkaMonitor.java    License: Apache License 2.0 5 votes vote down vote up
private ConsumerPartitionVO createConsumerPartition(String groupId,
                                                    TopicPartition topicPartition,
                                                    OffsetAndMetadata offset)
{
   ConsumerPartitionVO vo = new ConsumerPartitionVO(groupId, topicPartition.topic(), topicPartition.partition());
   vo.setConsumerOffset(new ConsumerOffsetVO(-1, offset.offset()));
   return vo;
}
 
Example #30
Source Project: common-kafka   Author: cerner   File: ProcessingKafkaConsumerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void commitOffsets() {
    long previousCommitCount = ProcessingKafkaConsumer.COMMIT_METER.count();

    // Read a bunch of messages
    processingConsumer.nextRecord(POLL_TIME); // record 1
    processingConsumer.nextRecord(POLL_TIME); // null
    processingConsumer.nextRecord(POLL_TIME); // record 2
    processingConsumer.nextRecord(POLL_TIME); // record 3
    processingConsumer.nextRecord(POLL_TIME); // null
    processingConsumer.nextRecord(POLL_TIME); // record 4
    processingConsumer.nextRecord(POLL_TIME); // record 5
    processingConsumer.nextRecord(POLL_TIME); // record 6

    // Ack some of the messages. We should now have some acked and some pending
    processingConsumer.ack(topicPartition, record1.offset());
    processingConsumer.ack(topicPartition, record3.offset());
    processingConsumer.ack(new TopicPartition(record5.topic(), record5.partition()), record5.offset());

    processingConsumer.commitOffsets();

    assertThat(ProcessingKafkaConsumer.COMMIT_METER.count(), is(previousCommitCount + 1));

    Map<TopicPartition, OffsetAndMetadata> committedOffsets = new HashMap<>();

    // Although record 3 is completed record 2 is pending so for this partition record 1 is as high as we can commit
    committedOffsets.put(topicPartition, new OffsetAndMetadata(record1.offset() + 1));

    committedOffsets.put(new TopicPartition(record5.topic(), record5.partition()),
            new OffsetAndMetadata(record5.offset() + 1));

    verify(consumer).commitSync(committedOffsets);
    assertThat(processingConsumer.getCommittableOffsets().isEmpty(), is(true));
}