Java Code Examples for org.apache.kafka.clients.admin.TopicDescription#partitions()

The following examples show how to use org.apache.kafka.clients.admin.TopicDescription#partitions() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TopicAdmin.java    From kafka-message-tool with MIT License 5 votes vote down vote up
public Set<ClusterTopicInfo> describeTopics() throws InterruptedException, ExecutionException, TimeoutException {

        Set<ClusterTopicInfo> result = new HashSet<>();
        final ListTopicsResult listTopicsResult = kafkaClientsAdminClient.listTopics(new ListTopicsOptions().listInternal(false));
        final Collection<TopicListing> listings = listTopicsResult.listings().get(ApplicationConstants.FUTURE_GET_TIMEOUT_MS, TimeUnit.MILLISECONDS);
        Logger.debug(String.format("describeTopics.listings %s", listings));


        final Set<String> topicNames = listTopicsResult.names().get(ApplicationConstants.FUTURE_GET_TIMEOUT_MS, TimeUnit.MILLISECONDS);
        final DescribeTopicsResult describeTopicsResult = kafkaClientsAdminClient.describeTopics(topicNames);
        final Map<String, TopicDescription> stringTopicDescriptionMap = describeTopicsResult.all().get(ApplicationConstants.FUTURE_GET_TIMEOUT_MS,
                                                                                                       TimeUnit.MILLISECONDS);

        for (Map.Entry<String, TopicDescription> entry : stringTopicDescriptionMap.entrySet()) {
            final TopicDescription topicDescription = entry.getValue();
            final ClusterTopicInfo clusterTopicInfo = new ClusterTopicInfo(topicDescription.name(),
                                                                           topicDescription.partitions(),
                                                                           getConfigEntriesForTopic(topicDescription.name()));
            result.add(clusterTopicInfo);
        }
        return result;

    }
 
Example 2
Source File: KafkaAdminClient.java    From common-kafka with Apache License 2.0 5 votes vote down vote up
private Set<TopicAndPartition> getPartitions(Collection<String> topics) {
    Set<TopicAndPartition> partitions = new HashSet<>();
    for (TopicDescription topicDescription : getTopicDescriptions(topics)) {
        for (TopicPartitionInfo partition : topicDescription.partitions()) {
            partitions.add(new TopicAndPartition(topicDescription.name(), partition.partition()));
        }
    }
    return Collections.unmodifiableSet(partitions);
}
 
Example 3
Source File: KafkaTestClusterTest.java    From kafka-junit with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * This test starts a cluster with 2 brokers. It then attempts to create a topic
 * that spans both brokers.  It acts mostly as a sanity test vs validating behavior of the library.
 */
@Test
void testCreateTopicAcrossMultipleBrokers() throws Exception {
    final int numberOfBrokers = 2;
    final String topicName = "MultiBrokerTest2-" + System.currentTimeMillis();

    try (final KafkaTestCluster kafkaTestCluster
        = new KafkaTestCluster(numberOfBrokers, getDefaultBrokerOverrideProperties())) {

        // Start the cluster
        kafkaTestCluster.start();

        // Create test utils instance.
        final KafkaTestUtils testUtils = new KafkaTestUtils(kafkaTestCluster);

        // Define a new topic with 2 partitions, with replication factor of 2.
        testUtils.createTopic(topicName, numberOfBrokers, (short) numberOfBrokers);

        // Lets describe the topic.
        final TopicDescription topicDescription = testUtils.describeTopic(topicName);

        // Validate has 2 partitions
        Assertions.assertEquals(numberOfBrokers, topicDescription.partitions().size(), "Correct number of partitions.");

        // Validate the partitions have 2 replicas
        for (final TopicPartitionInfo topicPartitionInfo : topicDescription.partitions()) {
            Assertions.assertEquals(numberOfBrokers, topicPartitionInfo.replicas().size(), "Should have 2 replicas");
            Assertions.assertEquals(numberOfBrokers, topicPartitionInfo.isr().size(), "Should have 2 In-Sync-Replicas");
        }
    }
}
 
Example 4
Source File: KafkaAvailability.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
private Set<TopicDescription> groupTopicsByBroker(Collection<TopicDescription> tds, int podId) {
    Set<TopicDescription> topicPartitionInfos = new HashSet<>();
    for (TopicDescription td : tds) {
        log.trace("{}", td);
        for (TopicPartitionInfo pd : td.partitions()) {
            for (Node broker : pd.replicas()) {
                if (podId == broker.id()) {
                    topicPartitionInfos.add(td);
                }
            }
        }
    }
    return topicPartitionInfos;
}
 
Example 5
Source File: KafkaChannelDefinitionProcessorTest.java    From flowable-engine with Apache License 2.0 5 votes vote down vote up
@AfterEach
void tearDown() throws Exception {
    testEventConsumer.clear();

    List<EventDeployment> deployments = eventRepositoryService.createDeploymentQuery().list();
    for (EventDeployment eventDeployment : deployments) {
        eventRepositoryService.deleteDeployment(eventDeployment.getId());
    }

    eventRegistry.removeFlowableEventRegistryEventConsumer(testEventConsumer);

    Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>();
    Map<String, TopicDescription> topicDescriptions = adminClient.describeTopics(topicsToDelete)
        .all()
        .get(10, TimeUnit.SECONDS);

    try (Consumer<Object, Object> consumer = consumerFactory.createConsumer("test", "testCleanup")) {

        List<TopicPartition> partitions = new ArrayList<>();
        for (TopicDescription topicDescription : topicDescriptions.values()) {
            for (TopicPartitionInfo partition : topicDescription.partitions()) {
                partitions.add(new TopicPartition(topicDescription.name(), partition.partition()));
            }
        }

        for (Map.Entry<TopicPartition, Long> entry : consumer.endOffsets(partitions).entrySet()) {
            recordsToDelete.put(entry.getKey(), RecordsToDelete.beforeOffset(entry.getValue()));
        }

    }

    adminClient.deleteRecords(recordsToDelete)
        .all()
        .get(10, TimeUnit.SECONDS);
}
 
Example 6
Source File: KafkaTestClusterTest.java    From kafka-junit with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * Sanity test that a 2 node cluster behaves how we would expect it to.  It also serves as an example
 * of how you can start a multi-node cluster and then individually shutdown a broker to validate
 * the behavior of your application.
 *
 * This test does the following:
 *      - Starts a 2 node cluster.
 *      - Creates a topic with Partition Count = 2, ReplicationFactor = 2.
 *      - Publishes 2 messages to each partition (4 messages total)
 *      - Stops brokerId 2.  At this point the partition broker2 was the leader for should be transferred to broker1.
 *      - Consumes from topic from remaining broker.
 *      - Validates that all messages are retrieved, including those that were originally published
 *        to the broker which is now off-line.
 */
@Test
void testConsumingFromMultiBrokerClusterWhenBrokerIsStopped() throws Exception {
    final int numberOfBrokers = 2;
    final int numberOfPartitions = 2;
    final int numberOfMessagesPerPartition = 2;
    final int replicaFactor = 2;
    final String topicName = "MultiBrokerTest3-" + System.currentTimeMillis();

    try (final KafkaTestCluster kafkaTestCluster
        = new KafkaTestCluster(numberOfBrokers)) {

        // Start the cluster
        kafkaTestCluster.start();

        // Create test utils instance.
        final KafkaTestUtils testUtils = new KafkaTestUtils(kafkaTestCluster);

        // Create the topic, 2 partitions, replica factor of 2
        testUtils.createTopic(topicName, numberOfPartitions, (short) replicaFactor);

        // Describe the topic.
        final TopicDescription topicDescription = testUtils.describeTopic(topicName);

        // Validate it has 2 partitions
        Assertions.assertEquals(numberOfPartitions, topicDescription.partitions().size(), "Should have multiple partitions");

        // Validate each partition belongs to a different broker, and each partition has 1 ISRs.
        final Set<Integer> leaderIds = new HashSet<>();
        for (final TopicPartitionInfo partitionInfo : topicDescription.partitions()) {
            // Each partition should have 2 ISRs
            Assertions.assertEquals(
                replicaFactor,
                partitionInfo.isr().size(),
                "Partition " + partitionInfo.partition() + " missing ISR"
            );

            // Add leader Id to set.
            leaderIds.add(partitionInfo.leader().id());
        }
        Assertions.assertEquals(2, leaderIds.size(), "Should have two leaders");

        // Attempt to publish into each partition in the topic.
        for (int partitionId = 0; partitionId < numberOfPartitions; partitionId++) {
            // Produce records.
            final List<ProducedKafkaRecord<byte[], byte[]>> producedRecords
                = testUtils.produceRecords(numberOfMessagesPerPartition, topicName, partitionId);

            // Lets do some simple validation
            for (final ProducedKafkaRecord producedRecord: producedRecords) {
                Assertions.assertEquals(partitionId, producedRecord.getPartition(), "Should be on correct partition");
                Assertions.assertEquals(topicName, producedRecord.getTopic(), "Should be on correct topic");
            }
        }

        // Stop brokerId 2.
        kafkaTestCluster
            .getKafkaBrokerById(2)
            .stop();

        // It may take a moment for the broker to cleanly shut down.
        List<Node> nodes;
        for (int attempts = 0; attempts <= 5; attempts++) {
            // Describe the cluster and wait for it to go to 1 broker.
            nodes = testUtils.describeClusterNodes();
            if (nodes.size() == 1) {
                break;
            }
            Thread.sleep(1000L);
        }

        // Consume all messages
        final List<ConsumerRecord<byte[], byte[]>> consumedRecords = testUtils.consumeAllRecordsFromTopic(topicName);

        // Validate we have (numberOfMessagesPerPartition * numberOfPartitions) records.
        Assertions.assertEquals(
            (numberOfMessagesPerPartition * numberOfPartitions),
            consumedRecords.size(),
            "Found all records in kafka."
        );
    }
}
 
Example 7
Source File: KafkaAvailability.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
private boolean wouldAffectAvailability(int broker, Map<String, Config> nameToConfig, TopicDescription td) {
    Config config = nameToConfig.get(td.name());
    ConfigEntry minIsrConfig = config.get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG);
    int minIsr;
    if (minIsrConfig != null && minIsrConfig.value() != null) {
        minIsr = parseInt(minIsrConfig.value());
        log.debug("{} has {}={}.", td.name(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr);
    } else {
        minIsr = -1;
        log.debug("{} lacks {}.", td.name(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG);
    }

    for (TopicPartitionInfo pi : td.partitions()) {
        List<Node> isr = pi.isr();
        if (minIsr >= 0) {
            if (pi.replicas().size() <= minIsr) {
                log.debug("{}/{} will be underreplicated (|ISR|={} and {}={}) if broker {} is restarted, but there are only {} replicas.",
                        td.name(), pi.partition(), isr.size(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr, broker,
                        pi.replicas().size());
            } else if (isr.size() < minIsr
                    && contains(pi.replicas(), broker)) {
                logIsrReplicas(td, pi, isr);
                log.info("{}/{} is already underreplicated (|ISR|={}, {}={}); broker {} has a replica, " +
                                "so should not be restarted right now (it might be first to catch up).",
                        td.name(), pi.partition(), isr.size(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr, broker);
                return true;
            } else if (isr.size() == minIsr
                    && contains(isr, broker)) {
                if (minIsr < pi.replicas().size()) {
                    logIsrReplicas(td, pi, isr);
                    log.info("{}/{} will be underreplicated (|ISR|={} and {}={}) if broker {} is restarted.",
                            td.name(), pi.partition(), isr.size(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr, broker);
                    return true;
                } else {
                    log.debug("{}/{} will be underreplicated (|ISR|={} and {}={}) if broker {} is restarted, but there are only {} replicas.",
                            td.name(), pi.partition(), isr.size(), TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minIsr, broker,
                            pi.replicas().size());
                }
            }
        }
    }
    return false;
}