org.apache.kafka.common.TopicPartitionInfo Java Examples

The following examples show how to use org.apache.kafka.common.TopicPartitionInfo. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MultiClusterTopicManagementService.java    From kafka-monitor with Apache License 2.0 6 votes vote down vote up
void maybeElectLeader() throws Exception {
  if (!_preferredLeaderElectionRequested) {
    return;
  }

  try (KafkaZkClient zkClient = KafkaZkClient.apply(_zkConnect, JaasUtils.isZkSecurityEnabled(), com.linkedin.kmf.common.Utils.ZK_SESSION_TIMEOUT_MS,
      com.linkedin.kmf.common.Utils.ZK_CONNECTION_TIMEOUT_MS, Integer.MAX_VALUE, Time.SYSTEM, METRIC_GROUP_NAME, "SessionExpireListener", null)) {
    if (!zkClient.reassignPartitionsInProgress()) {
      List<TopicPartitionInfo> partitionInfoList = _adminClient
          .describeTopics(Collections.singleton(_topic)).all().get().get(_topic).partitions();
      LOGGER.info(
          "MultiClusterTopicManagementService will trigger requested preferred leader election for the"
              + " topic {} in cluster.", _topic);
      triggerPreferredLeaderElection(partitionInfoList, _topic);
      _preferredLeaderElectionRequested = false;
    }
  }
}
 
Example #2
Source File: Utils.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
public static TopicMetadata getTopicMetadata(Topic kubeTopic) {
    List<Node> nodes = new ArrayList<>();
    for (int nodeId = 0; nodeId < kubeTopic.getNumReplicas(); nodeId++) {
        nodes.add(new Node(nodeId, "localhost", 9092 + nodeId));
    }
    List<TopicPartitionInfo> partitions = new ArrayList<>();
    for (int partitionId = 0; partitionId < kubeTopic.getNumPartitions(); partitionId++) {
        partitions.add(new TopicPartitionInfo(partitionId, nodes.get(0), nodes, nodes));
    }
    List<ConfigEntry> configs = new ArrayList<>();
    for (Map.Entry<String, String> entry: kubeTopic.getConfig().entrySet()) {
        configs.add(new ConfigEntry(entry.getKey(), entry.getValue()));
    }

    return new TopicMetadata(new TopicDescription(kubeTopic.getTopicName().toString(), false,
            partitions), new Config(configs));
}
 
Example #3
Source File: KafkaTestUtils.java    From kafka-junit with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
/**
 * This will consume all records from all partitions on the given topic.
 * @param <K> Type of key values.
 * @param <V> Type of message values.
 * @param topic Topic to consume from.
 * @param keyDeserializer How to deserialize the key values.
 * @param valueDeserializer How to deserialize the messages.
 * @return List of ConsumerRecords consumed.
 */
public <K, V> List<ConsumerRecord<K, V>> consumeAllRecordsFromTopic(
    final String topic,
    final Class<? extends Deserializer<K>> keyDeserializer,
    final Class<? extends Deserializer<V>> valueDeserializer
) {
    // Find all partitions on topic.
    final TopicDescription topicDescription = describeTopic(topic);
    final Collection<Integer> partitions = topicDescription
        .partitions()
        .stream()
        .map(TopicPartitionInfo::partition)
        .collect(Collectors.toList());

    // Consume messages
    return consumeAllRecordsFromTopic(topic, partitions, keyDeserializer, valueDeserializer);
}
 
Example #4
Source File: KafkaAdminClient.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Returns the replication factor for the given topic
 *
 * @param topic
 *      a Kafka topic
 * @return the replication factor for the given topic
 *
 * @throws IllegalArgumentException
 *      if topic is null, empty or blank
 * @throws AdminOperationException
 *      if there is an issue retrieving the replication factor
 */
public int getTopicReplicationFactor(String topic) {
    if (StringUtils.isBlank(topic))
        throw new IllegalArgumentException("topic cannot be null, empty or blank");

    LOG.debug("Getting replication factor for topic [{}]", topic);

    Collection<TopicDescription> topicDescription = getTopicDescriptions(Collections.singleton(topic));
    if (topicDescription.isEmpty()) {
        throw new AdminOperationException("Unable to get description for topic: " + topic);
    }

    List<TopicPartitionInfo> topicPartitions = topicDescription.iterator().next().partitions();
    if (topicPartitions.isEmpty()) {
        throw new AdminOperationException("Unable to get partitions for topic: " + topic);
    }

    return topicPartitions.get(0).replicas().size();
}
 
Example #5
Source File: TopicManagementServiceTest.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
@Test
public void detectLowTotalNumberOfPartitions() {
  List<TopicPartitionInfo> partitions = new ArrayList<>();
  Node[] node = nodes(3);
  partitions.add(new TopicPartitionInfo(0, node[0], new ArrayList<>(Arrays.asList(node[0], node[1])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(1, node[1], new ArrayList<>(Arrays.asList(node[1], node[0])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(2, node[2], new ArrayList<>(Arrays.asList(node[2], node[0])), new ArrayList<>()));
  Assert.assertFalse(TopicManagementHelper.someBrokerNotPreferredLeader(partitions, brokers(3)));
  Assert.assertFalse(TopicManagementHelper.someBrokerNotElectedLeader(partitions, brokers(3)));
  Assert.assertEquals(TopicManagementHelper.getReplicationFactor(partitions), 2);
}
 
Example #6
Source File: MultiClusterTopicManagementService.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
void maybeAddPartitions(int minPartitionNum) throws ExecutionException, InterruptedException {
  Map<String, KafkaFuture<TopicDescription>> kafkaFutureMap =
      _adminClient.describeTopics(Collections.singleton(_topic)).values();
  KafkaFuture<TopicDescription> topicDescriptions = kafkaFutureMap.get(_topic);
  List<TopicPartitionInfo> partitions = topicDescriptions.get().partitions();

  int partitionNum = partitions.size();
  if (partitionNum < minPartitionNum) {
    LOGGER.info("{} will increase partition of the topic {} in the cluster from {}"
        + " to {}.", this.getClass().toString(), _topic, partitionNum, minPartitionNum);
    Set<Integer> blackListedBrokers = _topicFactory.getBlackListedBrokers(_zkConnect);
    Set<BrokerMetadata> brokers = new HashSet<>();
    for (Node broker : _adminClient.describeCluster().nodes().get()) {
      BrokerMetadata brokerMetadata = new BrokerMetadata(
          broker.id(), null
      );
      brokers.add(brokerMetadata);
    }

    if (!blackListedBrokers.isEmpty()) {
      brokers.removeIf(broker -> blackListedBrokers.contains(broker.id()));
    }

    List<List<Integer>> newPartitionAssignments = newPartitionAssignments(minPartitionNum, partitionNum, brokers, _replicationFactor);

    NewPartitions newPartitions = NewPartitions.increaseTo(minPartitionNum, newPartitionAssignments);

    Map<String, NewPartitions> newPartitionsMap = new HashMap<>();
    newPartitionsMap.put(_topic, newPartitions);
    CreatePartitionsResult createPartitionsResult = _adminClient.createPartitions(newPartitionsMap);

  }
}
 
Example #7
Source File: MultiClusterTopicManagementService.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
private void triggerPreferredLeaderElection(List<TopicPartitionInfo> partitionInfoList, String partitionTopic)
    throws ExecutionException, InterruptedException {
  Collection<TopicPartition> partitions = new HashSet<>();
  for (TopicPartitionInfo javaPartitionInfo : partitionInfoList) {
    partitions.add(new TopicPartition(partitionTopic, javaPartitionInfo.partition()));
  }
  ElectPreferredLeadersResult electPreferredLeadersResult = _adminClient.electPreferredLeaders(partitions);

  LOGGER.info("{}: triggerPreferredLeaderElection - {}", this.getClass().toString(), electPreferredLeadersResult.all());
}
 
Example #8
Source File: MultiClusterTopicManagementService.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
static int getReplicationFactor(List<TopicPartitionInfo> partitionInfoList) {
  if (partitionInfoList.isEmpty())
    throw new RuntimeException("Partition list is empty.");

  int replicationFactor = partitionInfoList.get(0).replicas().size();
  for (TopicPartitionInfo partitionInfo : partitionInfoList) {
    if (replicationFactor != partitionInfo.replicas().size()) {
      LOGGER.warn("Partitions of the topic have different replication factor.");
      return -1;
    }
  }
  return replicationFactor;
}
 
Example #9
Source File: MultiClusterTopicManagementService.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
static boolean someBrokerNotPreferredLeader(List<TopicPartitionInfo> partitionInfoList, Collection<Node> brokers) {
  Set<Integer> brokersNotPreferredLeader = new HashSet<>(brokers.size());
  for (Node broker: brokers)
    brokersNotPreferredLeader.add(broker.id());
  for (TopicPartitionInfo partitionInfo : partitionInfoList)
    brokersNotPreferredLeader.remove(partitionInfo.replicas().get(0).id());

  return !brokersNotPreferredLeader.isEmpty();
}
 
Example #10
Source File: MultiClusterTopicManagementService.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
static boolean someBrokerNotElectedLeader(List<TopicPartitionInfo> partitionInfoList, Collection<Node> brokers) {
  Set<Integer> brokersNotElectedLeader = new HashSet<>(brokers.size());
  for (Node broker: brokers)
    brokersNotElectedLeader.add(broker.id());
  for (TopicPartitionInfo partitionInfo : partitionInfoList) {
    if (partitionInfo.leader() != null)
      brokersNotElectedLeader.remove(partitionInfo.leader().id());
  }
  return !brokersNotElectedLeader.isEmpty();
}
 
Example #11
Source File: TopicManagementServiceTest.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
@Test
public void noDetection() {
  List<TopicPartitionInfo> partitions = new ArrayList<>();
  Node[] node = nodes(2);
  partitions.add(new TopicPartitionInfo(0, node[0], new ArrayList<>(Arrays.asList(node[0], node[1])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(1, node[0], new ArrayList<>(Arrays.asList(node[0], node[1])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(2, node[1], new ArrayList<>(Arrays.asList(node[1], node[0])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(3, node[1], new ArrayList<>(Arrays.asList(node[1], node[0])), new ArrayList<>()));

  Assert.assertFalse(TopicManagementHelper.someBrokerNotPreferredLeader(partitions, brokers(2)));
  Assert.assertFalse(TopicManagementHelper.someBrokerNotElectedLeader(partitions, brokers(2)));
}
 
Example #12
Source File: LeaderInSyncByPartitionFunctionTest.java    From data-highway with Apache License 2.0 5 votes vote down vote up
@Test
public void notLocalBroker() throws Exception {
  TopicPartitionInfo tpi = new TopicPartitionInfo(0, node0, singletonList(node1), singletonList(node0));
  KafkaFuture<Map<String, TopicDescription>> kafkaFuture = topicDescriptionFuture(tpi);

  doReturn(describeTopicsResult).when(adminClient).describeTopics(topics);
  doReturn(kafkaFuture).when(describeTopicsResult).all();

  Map<TopicPartition, LeaderInSync> result = underTest.apply(0, topics);

  assertThat(result.size(), is(0));
}
 
Example #13
Source File: TopicManagementServiceTest.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
@Test
public void detectBrokerWithoutLeader() {
  List<TopicPartitionInfo> partitions = new ArrayList<>();
  Node[] node = nodes(3);
  partitions.add(new TopicPartitionInfo(0, node[0], new ArrayList<>(Arrays.asList(node[0], node[1])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(1, node[0], new ArrayList<>(Arrays.asList(node[0], node[1])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(2, node[1], new ArrayList<>(Arrays.asList(node[1], node[0])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(3, node[1], new ArrayList<>(Arrays.asList(node[2], node[1])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(4, node[1], new ArrayList<>(Arrays.asList(node[2], node[0])), new ArrayList<>()));

  Assert.assertFalse(TopicManagementHelper.someBrokerNotPreferredLeader(partitions, brokers(3)));
  Assert.assertTrue(TopicManagementHelper.someBrokerNotElectedLeader(partitions, brokers(3)));
}
 
Example #14
Source File: TopicManagementServiceTest.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
@Test
public void detectBrokerWithoutPreferredLeader() {
  List<TopicPartitionInfo> partitions = new ArrayList<>();
  Node[] node = nodes(3);
  partitions.add(new TopicPartitionInfo(0, node[0], new ArrayList<>(Arrays.asList(node[0], node[1])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(1, node[0], new ArrayList<>(Arrays.asList(node[0], node[1])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(2, node[1], new ArrayList<>(Arrays.asList(node[0], node[0])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(3, node[1], new ArrayList<>(Arrays.asList(node[2], node[1])), new ArrayList<>()));
  partitions.add(new TopicPartitionInfo(4, node[1], new ArrayList<>(Arrays.asList(node[2], node[0])), new ArrayList<>()));

  Assert.assertTrue(TopicManagementHelper.someBrokerNotPreferredLeader(partitions, brokers(3)));
  Assert.assertTrue(TopicManagementHelper.someBrokerNotElectedLeader(partitions, brokers(3)));
}
 
Example #15
Source File: KafkaAvailability.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
private Set<TopicDescription> groupTopicsByBroker(Collection<TopicDescription> tds, int podId) {
    Set<TopicDescription> topicPartitionInfos = new HashSet<>();
    for (TopicDescription td : tds) {
        log.trace("{}", td);
        for (TopicPartitionInfo pd : td.partitions()) {
            for (Node broker : pd.replicas()) {
                if (podId == broker.id()) {
                    topicPartitionInfos.add(td);
                }
            }
        }
    }
    return topicPartitionInfos;
}
 
Example #16
Source File: KafkaAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
void mockDescribeTopics(Admin mockAc) {
    when(mockAc.describeTopics(any())).thenAnswer(invocation -> {
        DescribeTopicsResult dtr = mock(DescribeTopicsResult.class);
        Collection<String> topicNames = invocation.getArgument(0);
        Throwable throwable = null;
        for (String topicName : topicNames) {
            throwable = describeTopicsResult.get(topicName);
            if (throwable != null) {
                break;
            }
        }
        if (throwable != null) {
            when(dtr.all()).thenReturn(failedFuture(throwable));
        } else {
            Map<String, TopicDescription> tds = topics.entrySet().stream().collect(Collectors.toMap(
                e -> e.getKey(),
                e -> {
                    TSB tsb = e.getValue();
                    return new TopicDescription(tsb.name, tsb.internal,
                            tsb.partitions.entrySet().stream().map(e1 -> {
                                TSB.PSB psb = e1.getValue();
                                return new TopicPartitionInfo(psb.id,
                                        psb.leader != null ? node(psb.leader) : Node.noNode(),
                                        Arrays.stream(psb.replicaOn).boxed().map(broker -> node(broker)).collect(Collectors.toList()),
                                        Arrays.stream(psb.isr).boxed().map(broker -> node(broker)).collect(Collectors.toList()));
                            }).collect(Collectors.toList()));
                }
            ));
            when(dtr.all()).thenReturn(KafkaFuture.completedFuture(tds));
            when(dtr.values()).thenThrow(notImplemented());
        }
        return dtr;
    });
}
 
Example #17
Source File: Utils.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
public static TopicMetadata getTopicMetadata(String topicName, Config config) {
    Node node0 = new Node(0, "host0", 1234);
    Node node1 = new Node(1, "host1", 1234);
    Node node2 = new Node(2, "host2", 1234);
    List<Node> nodes02 = asList(node0, node1, node2);
    TopicDescription desc = new TopicDescription(topicName, false, asList(
            new TopicPartitionInfo(0, node0, nodes02, nodes02),
            new TopicPartitionInfo(1, node0, nodes02, nodes02)
    ));
    //org.apache.kafka.clients.admin.Config config = new Config(configs);
    return new TopicMetadata(desc, config);
}
 
Example #18
Source File: KafkaChannelDefinitionProcessorTest.java    From flowable-engine with Apache License 2.0 5 votes vote down vote up
@AfterEach
void tearDown() throws Exception {
    testEventConsumer.clear();

    List<EventDeployment> deployments = eventRepositoryService.createDeploymentQuery().list();
    for (EventDeployment eventDeployment : deployments) {
        eventRepositoryService.deleteDeployment(eventDeployment.getId());
    }

    eventRegistry.removeFlowableEventRegistryEventConsumer(testEventConsumer);

    Map<TopicPartition, RecordsToDelete> recordsToDelete = new HashMap<>();
    Map<String, TopicDescription> topicDescriptions = adminClient.describeTopics(topicsToDelete)
        .all()
        .get(10, TimeUnit.SECONDS);

    try (Consumer<Object, Object> consumer = consumerFactory.createConsumer("test", "testCleanup")) {

        List<TopicPartition> partitions = new ArrayList<>();
        for (TopicDescription topicDescription : topicDescriptions.values()) {
            for (TopicPartitionInfo partition : topicDescription.partitions()) {
                partitions.add(new TopicPartition(topicDescription.name(), partition.partition()));
            }
        }

        for (Map.Entry<TopicPartition, Long> entry : consumer.endOffsets(partitions).entrySet()) {
            recordsToDelete.put(entry.getKey(), RecordsToDelete.beforeOffset(entry.getValue()));
        }

    }

    adminClient.deleteRecords(recordsToDelete)
        .all()
        .get(10, TimeUnit.SECONDS);
}
 
Example #19
Source File: ClientKafkaMonitor.java    From Kafdrop with Apache License 2.0 5 votes vote down vote up
private TopicPartitionVO createTopicPartition(TopicPartitionInfo partition)
{
   TopicPartitionVO topicPartition = new TopicPartitionVO(partition.partition());
   partition.replicas()
      .forEach(replica -> topicPartition.addReplica(
         new TopicPartitionVO.PartitionReplica(replica.id(),
                                               partition.isr().contains(replica),
                                               partition.leader().id() == replica.id())));
   return topicPartition;
}
 
Example #20
Source File: JoinNodeTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
private void
setupTopicClientExpectations(int streamPartitions, int tablePartitions) {
  Node node = new Node(0, "localhost", 9091);

  List<TopicPartitionInfo> streamPartitionInfoList =
      IntStream.range(0, streamPartitions)
          .mapToObj(
              p -> new TopicPartitionInfo(p, node, Collections.emptyList(), Collections.emptyList()))
          .collect(Collectors.toList());
  EasyMock.expect(topicClient.describeTopics(Arrays.asList("test1")))
      .andReturn(
          Collections.singletonMap(
              "test1",
              new TopicDescription("test1", false, streamPartitionInfoList)));

  List<TopicPartitionInfo> tablePartitionInfoList =
      IntStream.range(0, tablePartitions)
      .mapToObj(
          p -> new TopicPartitionInfo(p, node, Collections.emptyList(), Collections.emptyList()))
      .collect(Collectors.toList());
  EasyMock.expect(topicClient.describeTopics(Arrays.asList("test2")))
      .andReturn(
          Collections.singletonMap(
              "test2",
              new TopicDescription("test2", false, tablePartitionInfoList)));
  EasyMock.replay(topicClient);
}
 
Example #21
Source File: LeaderInSyncByPartitionFunctionTest.java    From data-highway with Apache License 2.0 5 votes vote down vote up
@Test
public void typical() throws Exception {
  TopicPartitionInfo tpi = new TopicPartitionInfo(0, node0, singletonList(node0), singletonList(node0));
  KafkaFuture<Map<String, TopicDescription>> kafkaFuture = topicDescriptionFuture(tpi);

  doReturn(describeTopicsResult).when(adminClient).describeTopics(topics);
  doReturn(kafkaFuture).when(describeTopicsResult).all();

  Map<TopicPartition, LeaderInSync> result = underTest.apply(0, topics);

  assertThat(result.size(), is(1));
  LeaderInSync leaderInSync = result.get(new TopicPartition(topic, 0));
  assertThat(leaderInSync.isLeader(), is(true));
  assertThat(leaderInSync.isInSync(), is(true));
}
 
Example #22
Source File: LeaderInSyncByPartitionFunctionTest.java    From data-highway with Apache License 2.0 5 votes vote down vote up
@Test
public void notLeader() throws Exception {
  TopicPartitionInfo tpi = new TopicPartitionInfo(0, node1, singletonList(node0), singletonList(node0));
  KafkaFuture<Map<String, TopicDescription>> kafkaFuture = topicDescriptionFuture(tpi);

  doReturn(describeTopicsResult).when(adminClient).describeTopics(topics);
  doReturn(kafkaFuture).when(describeTopicsResult).all();

  Map<TopicPartition, LeaderInSync> result = underTest.apply(0, topics);

  assertThat(result.size(), is(1));
  LeaderInSync leaderInSync = result.get(new TopicPartition(topic, 0));
  assertThat(leaderInSync.isLeader(), is(false));
  assertThat(leaderInSync.isInSync(), is(true));
}
 
Example #23
Source File: LeaderInSyncByPartitionFunctionTest.java    From data-highway with Apache License 2.0 5 votes vote down vote up
@Test
public void notInSync() throws Exception {
  TopicPartitionInfo tpi = new TopicPartitionInfo(0, node0, singletonList(node0), singletonList(node1));
  KafkaFuture<Map<String, TopicDescription>> kafkaFuture = topicDescriptionFuture(tpi);

  doReturn(describeTopicsResult).when(adminClient).describeTopics(topics);
  doReturn(kafkaFuture).when(describeTopicsResult).all();

  Map<TopicPartition, LeaderInSync> result = underTest.apply(0, topics);

  assertThat(result.size(), is(1));
  LeaderInSync leaderInSync = result.get(new TopicPartition(topic, 0));
  assertThat(leaderInSync.isLeader(), is(true));
  assertThat(leaderInSync.isInSync(), is(false));
}
 
Example #24
Source File: LeaderInSyncByPartitionFunctionTest.java    From data-highway with Apache License 2.0 5 votes vote down vote up
@Test
public void noLeader() throws Exception {
  TopicPartitionInfo tpi = new TopicPartitionInfo(0, null, singletonList(node0), singletonList(node0));
  KafkaFuture<Map<String, TopicDescription>> kafkaFuture = topicDescriptionFuture(tpi);

  doReturn(describeTopicsResult).when(adminClient).describeTopics(topics);
  doReturn(kafkaFuture).when(describeTopicsResult).all();

  Map<TopicPartition, LeaderInSync> result = underTest.apply(0, topics);

  assertThat(result.size(), is(1));
  LeaderInSync leaderInSync = result.get(new TopicPartition(topic, 0));
  assertThat(leaderInSync.isLeader(), is(false));
  assertThat(leaderInSync.isInSync(), is(true));
}
 
Example #25
Source File: TopicServiceImplTest.java    From kafka-helmsman with MIT License 5 votes vote down vote up
@Test
public void testConfiguredTopic() {
  Cluster cluster = createCluster(1);
  TopicPartitionInfo tp = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList());
  TopicDescription td = new TopicDescription("test", false, Collections.singletonList(tp));
  ConfigEntry configEntry = mock(ConfigEntry.class);
  when(configEntry.source()).thenReturn(ConfigEntry.ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG);
  KafkaFuture<Config> kfc = KafkaFuture.completedFuture(new Config(Collections.singletonList(configEntry)));
  ConfiguredTopic expected = new ConfiguredTopic("test", 1, (short) 1, Collections.emptyMap());
  Assert.assertEquals(expected, TopicServiceImpl.configuredTopic(td, kfc));
}
 
Example #26
Source File: TopicServiceImplTest.java    From kafka-helmsman with MIT License 5 votes vote down vote up
@Test
public void testListExisting() {
  Cluster cluster = createCluster(1);
  TopicPartitionInfo tp = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList());
  ConfigEntry configEntry = new ConfigEntry("k", "v");
  KafkaFuture<Config> kfc = KafkaFuture.completedFuture(new Config(Collections.singletonList(configEntry)));
  Set<String> topicNames = new HashSet<>(Arrays.asList("a", "b", "_c"));
  Map<String, TopicDescription> tds = new HashMap<String, TopicDescription>() {
    {
      put("a", new TopicDescription("a", false, Collections.singletonList(tp)));
      put("b", new TopicDescription("b", false, Collections.singletonList(tp)));
      put("c", new TopicDescription("_c", false, Collections.singletonList(tp)));
    }
  };
  Map<ConfigResource, KafkaFuture<Config>> configs = new HashMap<ConfigResource, KafkaFuture<Config>>() {
    {
      put(new ConfigResource(TOPIC, "a"), kfc);
      put(new ConfigResource(TOPIC, "b"), kfc);
      put(new ConfigResource(TOPIC, "_c"), kfc);
    }
  };

  TopicService service = new TopicServiceImpl(adminClient, true);
  ListTopicsResult listTopicsResult = mock(ListTopicsResult.class);
  DescribeTopicsResult describeTopicsResult = mock(DescribeTopicsResult.class);
  DescribeConfigsResult describeConfigsResult = mock(DescribeConfigsResult.class);

  when(describeTopicsResult.all()).thenReturn(KafkaFuture.completedFuture(tds));
  when(listTopicsResult.names()).thenReturn(KafkaFuture.completedFuture(topicNames));
  when(describeConfigsResult.values()).thenReturn(configs);
  when(adminClient.listTopics(any(ListTopicsOptions.class))).thenReturn(listTopicsResult);
  when(adminClient.describeTopics(topicNames)).thenReturn(describeTopicsResult);
  when(adminClient.describeConfigs(any(Collection.class))).thenReturn(describeConfigsResult);

  Map<String, ConfiguredTopic> actual = service.listExisting(true);
  Assert.assertEquals(2, actual.size());
  Assert.assertEquals(new HashSet<>(Arrays.asList("a", "b")), actual.keySet());
}
 
Example #27
Source File: KafkaTopicClientImplTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
private DescribeTopicsResult getDescribeTopicsResult() {
  TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, node, Collections
      .singletonList(node), Collections.singletonList(node));
  TopicDescription topicDescription = new TopicDescription(
      topicName1, false, Collections.singletonList(topicPartitionInfo));
  DescribeTopicsResult describeTopicsResult = mock(DescribeTopicsResult.class);
  expect(describeTopicsResult.all()).andReturn(
      KafkaFuture.completedFuture(Collections.singletonMap(topicName1, topicDescription)));
  replay(describeTopicsResult);
  return describeTopicsResult;
}
 
Example #28
Source File: FakeKafkaTopicClient.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
public TopicDescription getDescription() {
  Node node = new Node(0, "localhost", 9091);
  List<TopicPartitionInfo> partitionInfoList =
      IntStream.range(0, numPartitions)
          .mapToObj(
              p -> new TopicPartitionInfo(p, node, Collections.emptyList(), Collections.emptyList()))
          .collect(Collectors.toList());
  return new TopicDescription(topicName, false, partitionInfoList);
}
 
Example #29
Source File: KafkaTopicsListTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldBuildValidTopicList() {

  Collection<KsqlTopic> ksqlTopics = Collections.emptyList();
  // represent the full list of topics
  Map<String, TopicDescription> topicDescriptions = new HashMap<>();
  TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(1, new Node(1, "", 8088),
                                                                 Collections.emptyList(), Collections.emptyList());
  topicDescriptions.put("test-topic", new TopicDescription("test-topic", false, Collections.singletonList(topicPartitionInfo)));


  /**
   * Return POJO for consumerGroupClient
   */
  TopicPartition topicPartition = new TopicPartition("test-topic", 1);
  KafkaConsumerGroupClientImpl.ConsumerSummary consumerSummary = new KafkaConsumerGroupClientImpl.ConsumerSummary("consumer-id");
  consumerSummary.addPartition(topicPartition);
  KafkaConsumerGroupClientImpl.ConsumerGroupSummary consumerGroupSummary = new KafkaConsumerGroupClientImpl.ConsumerGroupSummary();
  consumerGroupSummary.addConsumerSummary(consumerSummary);



  KafkaConsumerGroupClient consumerGroupClient = mock(KafkaConsumerGroupClient.class);
  expect(consumerGroupClient.listGroups()).andReturn(Collections.singletonList("test-topic"));
  expect(consumerGroupClient.describeConsumerGroup("test-topic")).andReturn(consumerGroupSummary);
  replay(consumerGroupClient);

  /**
   * Test
   */

  KafkaTopicsList topicsList = KafkaTopicsList.build("statement test", ksqlTopics, topicDescriptions, new KsqlConfig(Collections.EMPTY_MAP), consumerGroupClient);

  assertThat(topicsList.getTopics().size(), equalTo(1));
  KafkaTopicInfo first = topicsList.getTopics().iterator().next();
  assertThat(first.getConsumerGroupCount(), equalTo(1));
  assertThat(first.getConsumerCount(), equalTo(1));
  assertThat(first.getReplicaInfo().size(), equalTo(1));

}
 
Example #30
Source File: ClusterTopicInfo.java    From kafka-message-tool with MIT License 5 votes vote down vote up
public ClusterTopicInfo(String topicName,
                        List<TopicPartitionInfo> partitions,
                        Set<ConfigEntry> configEntries) {

    this.topicName = topicName;
    this.partitions = partitions;
    this.configEntries = configEntries;
}