org.apache.kafka.common.record.RecordBatch Java Examples

The following examples show how to use org.apache.kafka.common.record.RecordBatch. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaRequestHandler.java    From kop with Apache License 2.0 6 votes vote down vote up
protected ApiVersionsResponse overloadDefaultApiVersionsResponse() {
    List<ApiVersionsResponse.ApiVersion> versionList = new ArrayList<>();
    for (ApiKeys apiKey : ApiKeys.values()) {
        if (apiKey.minRequiredInterBrokerMagic <= RecordBatch.CURRENT_MAGIC_VALUE) {
            switch (apiKey) {
                case FETCH:
                    // V4 added MessageSets responses. We need to make sure RecordBatch format is not used
                    versionList.add(new ApiVersionsResponse.ApiVersion((short) 1, (short) 4,
                            apiKey.latestVersion()));
                    break;
                case LIST_OFFSETS:
                    // V0 is needed for librdkafka
                    versionList.add(new ApiVersionsResponse.ApiVersion((short) 2, (short) 0,
                            apiKey.latestVersion()));
                    break;
                default:
                    versionList.add(new ApiVersionsResponse.ApiVersion(apiKey));
            }
        }
    }
    return new ApiVersionsResponse(0, Errors.NONE, versionList);
}
 
Example #2
Source File: GroupMetadataManagerTest.java    From kop with Apache License 2.0 6 votes vote down vote up
private ByteBuffer newMemoryRecordsBuffer(List<SimpleRecord> records,
                                          long producerId,
                                          short producerEpoch,
                                          boolean isTxnOffsetCommit) {
    TimestampType timestampType = TimestampType.CREATE_TIME;
    long timestamp = Time.SYSTEM.milliseconds();

    ByteBuffer buffer = ByteBuffer.allocate(
        AbstractRecords.estimateSizeInBytes(
            RecordBatch.CURRENT_MAGIC_VALUE, offsetConfig.offsetsTopicCompressionType(), records
        )
    );

    MemoryRecordsBuilder builder = MemoryRecords.builder(
        buffer, RecordBatch.CURRENT_MAGIC_VALUE, offsetConfig.offsetsTopicCompressionType(),
        timestampType, 0L, timestamp,
        producerId,
        producerEpoch,
        0,
        isTxnOffsetCommit,
        RecordBatch.NO_PARTITION_LEADER_EPOCH
    );
    records.forEach(builder::append);
    return builder.build().buffer();
}
 
Example #3
Source File: GroupMetadataManagerTest.java    From kop with Apache License 2.0 6 votes vote down vote up
private int completeTransactionalOffsetCommit(ByteBuffer buffer,
                                              long producerId,
                                              short producerEpoch,
                                              long baseOffset,
                                              boolean isCommit) {
    MemoryRecordsBuilder builder = MemoryRecords.builder(
        buffer, RecordBatch.MAGIC_VALUE_V2, CompressionType.NONE,
        TimestampType.LOG_APPEND_TIME, baseOffset, Time.SYSTEM.milliseconds(),
        producerId, producerEpoch, 0, true, true,
        RecordBatch.NO_PARTITION_LEADER_EPOCH);
    ControlRecordType controlRecordType;
    if (isCommit) {
        controlRecordType = ControlRecordType.COMMIT;
    } else {
        controlRecordType = ControlRecordType.ABORT;
    }
    builder.appendEndTxnMarker(Time.SYSTEM.milliseconds(), new EndTransactionMarker(controlRecordType, 0));
    builder.build();
    return 1;
}
 
Example #4
Source File: KafkaCruiseControlUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
private static MetadataResponseData.MetadataResponseTopic prepareMetadataResponseTopic(MetadataResponse.TopicMetadata topicMetadata) {
  MetadataResponseData.MetadataResponseTopic metadataResponseTopic = new MetadataResponseData.MetadataResponseTopic();
  metadataResponseTopic.setErrorCode(topicMetadata.error().code())
                       .setName(topicMetadata.topic())
                       .setIsInternal(topicMetadata.isInternal())
                       .setTopicAuthorizedOperations(topicMetadata.authorizedOperations());

  for (MetadataResponse.PartitionMetadata partitionMetadata : topicMetadata.partitionMetadata()) {
    metadataResponseTopic.partitions().add(
        new MetadataResponseData.MetadataResponsePartition()
            .setErrorCode(partitionMetadata.error().code())
            .setPartitionIndex(partitionMetadata.partition())
            .setLeaderId(partitionMetadata.leader() == null ? -1 : partitionMetadata.leader().id())
            .setLeaderEpoch(partitionMetadata.leaderEpoch().orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH))
            .setReplicaNodes(partitionMetadata.replicas().stream().map(Node::id).collect(Collectors.toList()))
            .setIsrNodes(partitionMetadata.isr().stream().map(Node::id).collect(Collectors.toList()))
            .setOfflineReplicas(partitionMetadata.offlineReplicas().stream().map(Node::id).collect(Collectors.toList())));
  }

  return metadataResponseTopic;
}
 
Example #5
Source File: GroupMetadataManager.java    From kop with Apache License 2.0 5 votes vote down vote up
public CompletableFuture<Map<TopicPartition, Errors>> storeOffsets(
    GroupMetadata group,
    String consumerId,
    Map<TopicPartition, OffsetAndMetadata> offsetMetadata
) {
    return storeOffsets(
        group,
        consumerId,
        offsetMetadata,
        RecordBatch.NO_PRODUCER_ID,
        RecordBatch.NO_PRODUCER_EPOCH
    );
}
 
Example #6
Source File: GroupMetadataManagerTest.java    From kop with Apache License 2.0 4 votes vote down vote up
@Test
public void testGroupMetadataRemoval() throws Exception {
    @Cleanup
    Consumer<ByteBuffer> consumer = pulsarClient.newConsumer(Schema.BYTEBUFFER)
        .topic(groupMetadataManager.getTopicPartitionName())
        .subscriptionName("test-sub")
        .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest)
        .subscribe();
    TopicPartition topicPartition1 = new TopicPartition("foo", 0);
    TopicPartition topicPartition2 = new TopicPartition("foo", 1);

    groupMetadataManager.addPartitionOwnership(groupPartitionId);

    GroupMetadata group = new GroupMetadata(groupId, Empty);
    groupMetadataManager.addGroup(group);
    group.generationId(5);

    groupMetadataManager.cleanupGroupMetadata().get();

    Message<ByteBuffer> message = consumer.receive();
    while (message.getValue().array().length == 0) {
        // bypass above place holder message.
        message = consumer.receive();
    }
    assertTrue(message.getEventTime() > 0L);
    assertTrue(message.hasKey());
    byte[] key = message.getKeyBytes();

    BaseKey groupKey = GroupMetadataConstants.readMessageKey(ByteBuffer.wrap(key));
    assertTrue(groupKey instanceof GroupMetadataKey);
    GroupMetadataKey groupMetadataKey = (GroupMetadataKey) groupKey;
    assertEquals(groupId, groupMetadataKey.key());

    ByteBuffer value = message.getValue();
    MemoryRecords memRecords = MemoryRecords.readableRecords(value);
    AtomicBoolean verified = new AtomicBoolean(false);
    memRecords.batches().forEach(batch -> {
        assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, batch.magic());
        assertEquals(TimestampType.CREATE_TIME, batch.timestampType());
        for (Record record : batch) {
            assertFalse(verified.get());
            assertTrue(record.hasKey());
            assertFalse(record.hasValue());
            assertTrue(record.timestamp() > 0);
            BaseKey bk = GroupMetadataConstants.readMessageKey(record.key());
            assertTrue(bk instanceof GroupMetadataKey);
            GroupMetadataKey gmk = (GroupMetadataKey) bk;
            assertEquals(groupId, gmk.key());
            verified.set(true);
        }
    });
    assertTrue(verified.get());
    assertEquals(Optional.empty(), groupMetadataManager.getGroup(groupId));
    Map<TopicPartition, PartitionData> cachedOffsets = groupMetadataManager.getOffsets(
        groupId,
        Optional.of(Lists.newArrayList(
            topicPartition1,
            topicPartition2
        ))
    );
    assertEquals(
        OffsetFetchResponse.INVALID_OFFSET,
        cachedOffsets.get(topicPartition1).offset);
    assertEquals(
        OffsetFetchResponse.INVALID_OFFSET,
        cachedOffsets.get(topicPartition2).offset);

}
 
Example #7
Source File: GroupMetadataManagerTest.java    From kop with Apache License 2.0 4 votes vote down vote up
@Test
public void testExpireGroupWithOffsetsOnly() throws Exception {
    @Cleanup
    Consumer<ByteBuffer> consumer = pulsarClient.newConsumer(Schema.BYTEBUFFER)
        .topic(groupMetadataManager.getTopicPartitionName())
        .subscriptionName("test-sub")
        .subscriptionInitialPosition(SubscriptionInitialPosition.Earliest)
        .subscribe();
    // verify that the group is removed properly, but no tombstone is written if
    // this is a group which is only using kafka for offset storage

    String memberId = "";
    TopicPartition topicPartition1 = new TopicPartition("foo", 0);
    TopicPartition topicPartition2 = new TopicPartition("foo", 1);
    long offset = 37;

    groupMetadataManager.addPartitionOwnership(groupPartitionId);

    GroupMetadata group = new GroupMetadata(groupId, Empty);
    groupMetadataManager.addGroup(group);

    long startMs = Time.SYSTEM.milliseconds();
    Map<TopicPartition, OffsetAndMetadata> offsets = ImmutableMap.<TopicPartition, OffsetAndMetadata>builder()
        .put(topicPartition1, OffsetAndMetadata.apply(offset, "", startMs, startMs + 1))
        .put(topicPartition2, OffsetAndMetadata.apply(offset, "", startMs, startMs + 3))
        .build();

    Map<TopicPartition, Errors> commitErrors =
        groupMetadataManager.storeOffsets(group, memberId, offsets).get();
    assertTrue(group.hasOffsets());

    assertFalse(commitErrors.isEmpty());
    assertEquals(
        Errors.NONE,
        commitErrors.get(topicPartition1)
    );

    groupMetadataManager.cleanupGroupMetadata().get();

    Message<ByteBuffer> message = consumer.receive();
    // skip `storeOffsets` op, bypass place holder message.
    while (!message.hasKey()
        || GroupMetadataConstants.readMessageKey(ByteBuffer.wrap(message.getKeyBytes())) instanceof OffsetKey) {
        message = consumer.receive();
    }

    assertTrue(message.getEventTime() > 0L);
    assertTrue(message.hasKey());
    byte[] key = message.getKeyBytes();

    BaseKey groupKey = GroupMetadataConstants.readMessageKey(ByteBuffer.wrap(key));
    assertTrue(groupKey instanceof GroupMetadataKey);
    GroupMetadataKey gmk = (GroupMetadataKey) groupKey;
    assertEquals(groupId, gmk.key());

    ByteBuffer value = message.getValue();
    MemoryRecords memRecords = MemoryRecords.readableRecords(value);
    AtomicInteger verified = new AtomicInteger(2);
    memRecords.batches().forEach(batch -> {
        assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, batch.magic());
        assertEquals(TimestampType.CREATE_TIME, batch.timestampType());
        for (Record record : batch) {
            verified.decrementAndGet();
            assertTrue(record.hasKey());
            assertFalse(record.hasValue());
            assertTrue(record.timestamp() > 0);
            BaseKey bk = GroupMetadataConstants.readMessageKey(record.key());
            assertTrue(bk instanceof OffsetKey);
            OffsetKey ok = (OffsetKey) bk;
            assertEquals(groupId, ok.key().group());
            assertEquals("foo", ok.key().topicPartition().topic());
        }
    });
    assertEquals(0, verified.get());
    assertEquals(Optional.empty(), groupMetadataManager.getGroup(groupId));
    Map<TopicPartition, PartitionData> cachedOffsets = groupMetadataManager.getOffsets(
        groupId,
        Optional.of(Lists.newArrayList(
            topicPartition1,
            topicPartition2
        ))
    );
    assertEquals(
        OffsetFetchResponse.INVALID_OFFSET,
        cachedOffsets.get(topicPartition1).offset);
    assertEquals(
        OffsetFetchResponse.INVALID_OFFSET,
        cachedOffsets.get(topicPartition2).offset);
}
 
Example #8
Source File: KafkaProducerHeadersInstrumentation.java    From apm-agent-java with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings({"unused", "DuplicatedCode", "ParameterCanBeLocal"})
@Advice.OnMethodEnter(suppress = Throwable.class)
@Nullable
public static Span beforeSend(@Advice.FieldValue("apiVersions") final ApiVersions apiVersions,
                              @Advice.Argument(0) final ProducerRecord record,
                              @Advice.Local("helper") @Nullable KafkaInstrumentationHelper<Callback, ProducerRecord, KafkaProducer> helper,
                              @Nullable @Advice.Argument(value = 1, readOnly = false) Callback callback) {
    if (tracer == null) {
        return null;
    }
    Span span = null;

    //noinspection ConstantConditions
    helper = kafkaInstrHelperManager.getForClassLoaderOfClass(KafkaProducer.class);

    if (helper != null) {
        span = helper.onSendStart(record);
    }
    if (span == null) {
        return null;
    }

    // Avoid adding headers to records sent to a version older than 0.11.0 - see specifications in
    // https://kafka.apache.org/0110/documentation.html#messageformat
    if (apiVersions.maxUsableProduceMagic() >= RecordBatch.MAGIC_VALUE_V2 && headersSupported) {
        try {
            //noinspection ConstantConditions
            KafkaInstrumentationHeadersHelper<ConsumerRecord, ProducerRecord> kafkaInstrumentationHelper =
                kafkaInstrHeadersHelperManager.getForClassLoaderOfClass(KafkaProducer.class);
            if (kafkaInstrumentationHelper != null) {
                kafkaInstrumentationHelper.setOutgoingTraceContextHeaders(span, record);
            }
        } catch (final IllegalStateException e) {
            // headers are in a read-only state
            logger.debug("Failed to add header to Kafka record {}, probably to headers' read-only state.", record);
        }
    }

    //noinspection UnusedAssignment
    callback = helper.wrapCallback(callback, span);
    return span;
}
 
Example #9
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testAggregateWithUpdatedCluster() throws NotEnoughValidWindowsException {
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = getMetadata(Collections.singleton(TP));
  KafkaPartitionMetricSampleAggregator
      metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);

  TopicPartition tp1 = new TopicPartition(TOPIC0 + "1", 0);
  Cluster cluster = getCluster(Arrays.asList(TP, tp1));

  List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(2);
  topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE,
                                                       TOPIC0,
                                                       false,
                                                       Collections.singletonList(new MetadataResponse.PartitionMetadata(
                                                           Errors.NONE, PARTITION, NODE_0,
                                                           Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH),
                                                           Arrays.asList(nodes()), Arrays.asList(nodes()),
                                                           Collections.emptyList()))));
  topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE,
                                                       TOPIC0 + "1",
                                                       false,
                                                       Collections.singletonList(new MetadataResponse.PartitionMetadata(
                                                           Errors.NONE, 0, NODE_0,
                                                           Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH),
                                                           Arrays.asList(nodes()), Arrays.asList(nodes()),
                                                           Collections.emptyList()))));

  MetadataResponse metadataResponse = KafkaCruiseControlUtils.prepareMetadataResponse(cluster.nodes(),
                                                                                      cluster.clusterResource().clusterId(),
                                                                                      MetadataResponse.NO_CONTROLLER_ID,
                                                                                      topicMetadata);
  metadata.update(KafkaCruiseControlUtils.REQUEST_VERSION_UPDATE, metadataResponse, 1);


  Map<PartitionEntity, ValuesAndExtrapolations> aggregateResult =
      metricSampleAggregator.aggregate(cluster, Long.MAX_VALUE, new OperationProgress()).valuesAndExtrapolations();
  // Partition "topic-0" should be valid in all NUM_WINDOW windows and Partition "topic1-0" should not since
  // there is no sample for it.
  assertEquals(1, aggregateResult.size());
  assertEquals(NUM_WINDOWS, aggregateResult.get(PE).windows().size());

  ModelCompletenessRequirements requirements =
      new ModelCompletenessRequirements(1, 0.0, true);
  MetricSampleAggregationResult<String, PartitionEntity> result =
      metricSampleAggregator.aggregate(cluster, -1, Long.MAX_VALUE, requirements, new OperationProgress());
  aggregateResult = result.valuesAndExtrapolations();
  assertNotNull("tp1 should be included because includeAllTopics is set to true",
                aggregateResult.get(new PartitionEntity(tp1)));
  Map<Integer, Extrapolation> extrapolations = aggregateResult.get(new PartitionEntity(tp1)).extrapolations();
  assertEquals(NUM_WINDOWS, extrapolations.size());

  for (int i = 0; i < NUM_WINDOWS; i++) {
    assertEquals(Extrapolation.NO_VALID_EXTRAPOLATION, extrapolations.get(i));
  }
}
 
Example #10
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testAggregateWithPartitionExtrapolations() throws NotEnoughValidWindowsException {
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = getMetadata(Collections.singleton(TP));
  KafkaPartitionMetricSampleAggregator
      metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  TopicPartition tp1 = new TopicPartition(TOPIC0, 1);
  Cluster cluster = getCluster(Arrays.asList(TP, tp1));
  PartitionEntity pe1 = new PartitionEntity(tp1);

  List<MetadataResponse.PartitionMetadata> partitionMetadata =
      Collections.singletonList(new MetadataResponse.PartitionMetadata(Errors.NONE, 1, NODE_0,
                                                                       Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH),
                                                                       Arrays.asList(nodes()), Arrays.asList(nodes()),
                                                                       Collections.emptyList()));
  List<MetadataResponse.TopicMetadata> topicMetadata = Collections.singletonList(
      new MetadataResponse.TopicMetadata(Errors.NONE, TOPIC0, false, partitionMetadata));

  MetadataResponse metadataResponse = KafkaCruiseControlUtils.prepareMetadataResponse(cluster.nodes(),
                                                                                      cluster.clusterResource().clusterId(),
                                                                                      MetadataResponse.NO_CONTROLLER_ID,
                                                                                      topicMetadata);
  metadata.update(KafkaCruiseControlUtils.REQUEST_VERSION_UPDATE, metadataResponse, 1);
  populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
  //Populate partition 1 but leave 1 hole at NUM_WINDOWS'th window.
  CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 2, MIN_SAMPLES_PER_WINDOW,
                                                      metricSampleAggregator,
                                                      pe1,
                                                      0, WINDOW_MS,
                                                      KafkaMetricDef.commonMetricDef());
  CruiseControlUnitTestUtils.populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW,
                                                      metricSampleAggregator,
                                                      pe1,
                                                      NUM_WINDOWS - 1, WINDOW_MS,
                                                      KafkaMetricDef.commonMetricDef());
  MetricSampleAggregationResult<String, PartitionEntity> result =
      metricSampleAggregator.aggregate(cluster, Long.MAX_VALUE, new OperationProgress());
  assertEquals(2, result.valuesAndExtrapolations().size());
  assertTrue(result.valuesAndExtrapolations().get(PE).extrapolations().isEmpty());
  assertEquals(1, result.valuesAndExtrapolations().get(pe1).extrapolations().size());
  assertTrue(result.valuesAndExtrapolations().get(pe1).extrapolations().containsKey(1));
  assertEquals((NUM_WINDOWS - 1) * WINDOW_MS, result.valuesAndExtrapolations().get(pe1).window(1));
  assertEquals(Extrapolation.AVG_ADJACENT, result.valuesAndExtrapolations().get(pe1).extrapolations().get(1));
}
 
Example #11
Source File: ExactMessageHandler.java    From mirrormaker_topic_rename with Apache License 2.0 4 votes vote down vote up
public List<ProducerRecord<byte[], byte[]>> handle(BaseConsumerRecord record) {
  Long timestamp = record.timestamp() == RecordBatch.NO_TIMESTAMP ? null : record.timestamp();
  return Collections.singletonList(new ProducerRecord<byte[], byte[]>(record.topic(), record.partition(), timestamp, record.key(), record.value(), record.headers()));
}
 
Example #12
Source File: MockKafkaProducer.java    From samza with Apache License 2.0 4 votes vote down vote up
public FutureSuccess(ProducerRecord record, int offset) {
  this.record = record;
  this.metadata = new RecordMetadata(new TopicPartition(record.topic(), record.partition() == null ? 0 : record.partition()), 0, offset, RecordBatch.NO_TIMESTAMP, -1L, -1, -1);
}