org.apache.kafka.common.PartitionInfo Java Examples

The following examples show how to use org.apache.kafka.common.PartitionInfo. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaClusterManager.java    From doctorkafka with Apache License 2.0 6 votes vote down vote up
/**
 *  Remove the under-replicated partitions that are in the middle of partition reassignment.
 */
public List<PartitionInfo> filterOutInReassignmentUrps(List<PartitionInfo> urps,
                                                       Map<String, Integer> replicationFactors) {
  List<PartitionInfo> result = new ArrayList<>();
  for (PartitionInfo urp : urps) {
    if (urp.replicas().length <= replicationFactors.get(urp.topic())) {
      // # of replicas <= replication factor
      result.add(urp);
    } else {
      // # of replicas > replication factor. this can happen after
      // a failed partition reassignment
      Set<Integer> liveReplicas = new HashSet<>();
      for (Node node : urp.replicas()) {
        if (node.host() != null && OperatorUtil.pingKafkaBroker(node.host(), 9092, 5000)) {
          liveReplicas.add(node.id());
        }
      }
      if (liveReplicas.size() < replicationFactors.get(urp.topic())) {
        result.add(urp);
      }
    }
  }
  return result;
}
 
Example #2
Source File: SixtPartitionerTest.java    From ja-micro with Apache License 2.0 6 votes vote down vote up
@Ignore // By incorporating available partitions instead of overall partition count,
        // we were getting non-deterministic partitions for known keys.  This is not
        // what we want for some applications, so this was changed.
@Test
public void nullKeyRoundRobinThreeAvailablePartitionsTest() {
    List<PartitionInfo> partitions = new ArrayList<>();
    for (int i = 0; i < 3; i++) {
        partitions.add(new PartitionInfo(null, i, null, null, null));
    }
    when(cluster.availablePartitionsForTopic(anyString())).thenReturn(partitions);

    List<Integer> results = new ArrayList<>();
    for (int i = 0; i < 12; i++) {
        results.add(partitioner.partition("events", null, null,
                null, null, cluster));
    }
    List<Integer> shouldBe = of(0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2);
    assertThat(results).isEqualTo(shouldBe);
}
 
Example #3
Source File: ExecutionTaskPlannerTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test
public void testClear() {
  List<ExecutionProposal> proposals = new ArrayList<>();
  proposals.add(_leaderMovement1);
  proposals.add(_partitionMovement1);
  ExecutionTaskPlanner planner =
      new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties()));

  Set<PartitionInfo> partitions = new HashSet<>();

  partitions.add(generatePartitionInfo(_leaderMovement1, false));
  partitions.add(generatePartitionInfo(_partitionMovement1, false));

  Cluster expectedCluster = new Cluster(null,
                                        _expectedNodes,
                                        partitions,
                                        Collections.<String>emptySet(),
                                        Collections.<String>emptySet());

  planner.addExecutionProposals(proposals, expectedCluster, null);
  assertEquals(2, planner.remainingLeadershipMovements().size());
  assertEquals(2, planner.remainingInterBrokerReplicaMovements().size());
  planner.clear();
  assertEquals(0, planner.remainingLeadershipMovements().size());
  assertEquals(0, planner.remainingInterBrokerReplicaMovements().size());
}
 
Example #4
Source File: DefaultWebKafkaConsumer.java    From kafka-webview with MIT License 6 votes vote down vote up
private List<TopicPartition> getAllPartitions() {
    // If we have not pulled this yet
    if (cachedTopicsAndPartitions == null) {
        // Determine which partitions to subscribe to, for now do all
        final List<PartitionInfo> partitionInfos = kafkaConsumer.partitionsFor(clientConfig.getTopicConfig().getTopicName());

        // Pull out partitions, convert to topic partitions
        cachedTopicsAndPartitions = new ArrayList<>();
        for (final PartitionInfo partitionInfo : partitionInfos) {
            // Skip filtered partitions
            if (!clientConfig.isPartitionFiltered(partitionInfo.partition())) {
                cachedTopicsAndPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
            }
        }
    }
    return cachedTopicsAndPartitions;
}
 
Example #5
Source File: ConsumerOffsetClientTest.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
@Test
public void getEndOffsets() {
    Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.put(new TopicPartition("topic1", 0), 123L);
    offsets.put(new TopicPartition("topic1", 1), 234L);
    offsets.put(new TopicPartition("topic2", 0), 345L);
    offsets.put(new TopicPartition("topic2", 1), 456L);

    when(consumer.partitionsFor("topic1")).thenReturn(Arrays.asList(
            new PartitionInfo("topic1", 0, null, null, null),
            new PartitionInfo("topic1", 1, null, null, null)));
    when(consumer.partitionsFor("topic2")).thenReturn(Arrays.asList(
            new PartitionInfo("topic2", 0, null, null, null),
            new PartitionInfo("topic2", 1, null, null, null)));

    when(consumer.endOffsets(Arrays.asList(
            new TopicPartition("topic1", 0),
            new TopicPartition("topic1", 1),
            new TopicPartition("topic2", 0),
            new TopicPartition("topic2", 1)
    ))).thenReturn(offsets);

    assertThat(client.getEndOffsets(Arrays.asList("topic1", "topic2")), is(offsets));
}
 
Example #6
Source File: KafkaClient.java    From kylin with Apache License 2.0 6 votes vote down vote up
public static Map<Integer, Long> getEarliestOffsets(final CubeInstance cubeInstance) {
    final KafkaConfig kafkaConfig = KafkaConfigManager.getInstance(KylinConfig.getInstanceFromEnv()).getKafkaConfig(cubeInstance.getRootFactTable());

    final String brokers = KafkaClient.getKafkaBrokers(kafkaConfig);
    final String topic = kafkaConfig.getTopic();

    Map<Integer, Long> startOffsets = Maps.newHashMap();
    try (final KafkaConsumer consumer = KafkaClient.getKafkaConsumer(brokers, cubeInstance.getName())) {
        final List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
        for (PartitionInfo partitionInfo : partitionInfos) {
            long latest = getEarliestOffset(consumer, topic, partitionInfo.partition());
            startOffsets.put(partitionInfo.partition(), latest);
        }
    }
    return startOffsets;
}
 
Example #7
Source File: ConsumerOffsetClientTest.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
@Test
public void getPartitionsFor() {
    when(consumer.partitionsFor("topic1")).thenReturn(Arrays.asList(
            new PartitionInfo("topic1", 0, null, null, null),
            new PartitionInfo("topic1", 1, null, null, null)));
    when(consumer.partitionsFor("topic2")).thenReturn(Arrays.asList(
            new PartitionInfo("topic2", 0, null, null, null),
            new PartitionInfo("topic2", 1, null, null, null)));

    assertThat(client.getPartitionsFor(Arrays.asList("topic1", "topic2")), is(Arrays.asList(
            new TopicPartition("topic1", 0),
            new TopicPartition("topic1", 1),
            new TopicPartition("topic2", 0),
            new TopicPartition("topic2", 1)
    )));
}
 
Example #8
Source File: Email.java    From doctorkafka with Apache License 2.0 6 votes vote down vote up
public static void alertOnProlongedUnderReplicatedPartitions(String[] emails,
                                                             String clusterName,
                                                             int waitTimeInSeconds,
                                                             List<PartitionInfo> urps) {
  if (prolongedUrpEmails.containsKey(clusterName) &&
      System.currentTimeMillis() - prolongedUrpEmails.get(clusterName) < COOLOFF_INTERVAL) {
    // return to avoid spamming users if an email has been sent within the coll-time time span
    return;
  }

  prolongedUrpEmails.put(clusterName, System.currentTimeMillis());
  String title = clusterName + " has been under-replicated for > "
      + waitTimeInSeconds + " seconds (" + urps.size() + ") under-replicated partitions";
  StringBuilder sb = new StringBuilder();
  for (PartitionInfo partitionInfo : urps) {
    sb.append(partitionInfo + "\n");
  }
  String content = sb.toString();
  sendTo(emails, title, content);
}
 
Example #9
Source File: KafkaDispatcherImpl.java    From arcusplatform with Apache License 2.0 6 votes vote down vote up
private Collection<TopicPartition> toKafkaPartitions(
		String topic, 
		Set<PlatformPartition> newPartitions,
		KafkaConsumer<?, ?> consumer
) {
	List<PartitionInfo> kafkaPartitions = consumer.partitionsFor(topic);
	int partitionRatio = platformPartitions / kafkaPartitions.size(); 
	logger.info("Discovered [{}] kafka partitions and [{}] platform partitions: [{}] platform partitions per kafka partition", kafkaPartitions.size(), platformPartitions, partitionRatio);
	Map<Integer, Integer> partitionMap = new LinkedHashMap<>();
	for(PlatformPartition pp: newPartitions) {
		int kafkaPartition = pp.getId() % kafkaPartitions.size();
		partitionMap.put(kafkaPartition, partitionMap.getOrDefault(kafkaPartition, 0) + 1);
	}
	List<TopicPartition> tp = new ArrayList<>(Math.max(1, partitionMap.size()));
	for(Map.Entry<Integer, Integer> entry: partitionMap.entrySet()) {
		Preconditions.checkState(entry.getValue() == partitionRatio, "Kafka partition %d partially assigned to this node, that is not currently supported", entry.getKey());
		tp.add(new TopicPartition(topic, entry.getKey()));
	}
	logger.info("Assigning partitions [{}] to this node", partitionMap.keySet());
	return tp;
}
 
Example #10
Source File: KafkaAdminFactoryTest.java    From kafka-webview with MIT License 6 votes vote down vote up
/**
 * Test that KafkaAdminFactory can create a working KafkaConsumer when connecting to a non-ssl cluster.
 */
@Test
public void testCreateNonSslConsumer() {
    // Create Cluster config
    final ClusterConfig clusterConfig = ClusterConfig.newBuilder()
        .withBrokerHosts(sharedKafkaTestResource.getKafkaConnectString())
        .build();

    // Create a topic
    final String topicName = "MyRandomTopic";
    sharedKafkaTestResource.getKafkaTestUtils().createTopic(topicName, 1, (short) 1);

    final KafkaAdminFactory kafkaAdminFactory = new KafkaAdminFactory(new KafkaClientConfigUtil("NotUsed", "Prefix"));

    // Create instance
    try (final KafkaConsumer<String, String> consumerClient = kafkaAdminFactory.createConsumer(clusterConfig, "MyClientId")) {

        // Call method to validate things work as expected
        final Map<String, List<PartitionInfo>> results = consumerClient.listTopics();
        assertNotNull(results);
        assertTrue(results.containsKey(topicName), "Should have our topic.");
    }
}
 
Example #11
Source File: KafkaServiceImplTest.java    From metron with Apache License 2.0 6 votes vote down vote up
@Test
public void listTopicsHappyPath() {
  final Map<String, List<PartitionInfo>> topics = new HashMap<>();
  topics.put("topic1", Lists.newArrayList());
  topics.put("topic2", Lists.newArrayList());
  topics.put("topic3", Lists.newArrayList());

  when(kafkaConsumer.listTopics()).thenReturn(topics);

  final Set<String> listedTopics = kafkaService.listTopics();

  assertEquals(Sets.newHashSet("topic1", "topic2", "topic3"), listedTopics);

  verifyNoInteractions(zkUtils);
  verify(kafkaConsumer).listTopics();
  verify(kafkaConsumer).close();
  verifyNoMoreInteractions(kafkaConsumer, zkUtils);
}
 
Example #12
Source File: KafkaSampleStore.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
protected void prepareConsumers() {
  int numConsumers = _consumers.size();
  List<List<TopicPartition>> assignments = new ArrayList<>();
  for (int i = 0; i < numConsumers; i++) {
    assignments.add(new ArrayList<>());
  }
  int j = 0;
  for (String topic : Arrays.asList(_partitionMetricSampleStoreTopic, _brokerMetricSampleStoreTopic)) {
    for (PartitionInfo partInfo : _consumers.get(0).partitionsFor(topic)) {
      assignments.get(j++ % numConsumers).add(new TopicPartition(partInfo.topic(), partInfo.partition()));
    }
  }
  for (int i = 0; i < numConsumers; i++) {
    _consumers.get(i).assign(assignments.get(i));
  }
}
 
Example #13
Source File: FlinkKafkaProducer.java    From flink with Apache License 2.0 6 votes vote down vote up
private static int[] getPartitionsByTopic(String topic, Producer<byte[], byte[]> producer) {
	// the fetched list is immutable, so we're creating a mutable copy in order to sort it
	List<PartitionInfo> partitionsList = new ArrayList<>(producer.partitionsFor(topic));

	// sort the partitions by partition id to make sure the fetched partition list is the same across subtasks
	Collections.sort(partitionsList, new Comparator<PartitionInfo>() {
		@Override
		public int compare(PartitionInfo o1, PartitionInfo o2) {
			return Integer.compare(o1.partition(), o2.partition());
		}
	});

	int[] partitions = new int[partitionsList.size()];
	for (int i = 0; i < partitions.length; i++) {
		partitions[i] = partitionsList.get(i).partition();
	}

	return partitions;
}
 
Example #14
Source File: KafkaPartitionDiscoverer.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws AbstractPartitionDiscoverer.WakeupException {
	List<KafkaTopicPartition> partitions = new LinkedList<>();

	try {
		for (String topic : topics) {
			for (PartitionInfo partitionInfo : kafkaConsumer.partitionsFor(topic)) {
				partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition()));
			}
		}
	} catch (org.apache.kafka.common.errors.WakeupException e) {
		// rethrow our own wakeup exception
		throw new AbstractPartitionDiscoverer.WakeupException();
	}

	return partitions;
}
 
Example #15
Source File: FlinkKafkaProducerBase.java    From flink with Apache License 2.0 6 votes vote down vote up
protected static int[] getPartitionsByTopic(String topic, KafkaProducer<byte[], byte[]> producer) {
	// the fetched list is immutable, so we're creating a mutable copy in order to sort it
	List<PartitionInfo> partitionsList = new ArrayList<>(producer.partitionsFor(topic));

	// sort the partitions by partition id to make sure the fetched partition list is the same across subtasks
	Collections.sort(partitionsList, new Comparator<PartitionInfo>() {
		@Override
		public int compare(PartitionInfo o1, PartitionInfo o2) {
			return Integer.compare(o1.partition(), o2.partition());
		}
	});

	int[] partitions = new int[partitionsList.size()];
	for (int i = 0; i < partitions.length; i++) {
		partitions[i] = partitionsList.get(i).partition();
	}

	return partitions;
}
 
Example #16
Source File: HashPartitioner.java    From KafkaExample with Apache License 2.0 6 votes vote down vote up
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
	List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
	int numPartitions = partitions.size();
	if (keyBytes != null) {
		int hashCode = 0;
		if (key instanceof Integer || key instanceof Long) {
			hashCode = (int) key;
		} else {
			hashCode = key.hashCode();
		}
		hashCode = hashCode & 0x7fffffff;
		return hashCode % numPartitions;
	} else {
		return 0;
	}
}
 
Example #17
Source File: Kafka010PartitionDiscoverer.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected List<KafkaTopicPartition> getAllPartitionsForTopics(List<String> topics) throws WakeupException, RuntimeException {
	List<KafkaTopicPartition> partitions = new LinkedList<>();

	try {
		for (String topic : topics) {
			final List<PartitionInfo> kafkaPartitions = kafkaConsumer.partitionsFor(topic);

			if (kafkaPartitions == null) {
				throw new RuntimeException(String.format("Could not fetch partitions for %s. Make sure that the topic exists.", topic));
			}

			for (PartitionInfo partitionInfo : kafkaPartitions) {
				partitions.add(new KafkaTopicPartition(partitionInfo.topic(), partitionInfo.partition()));
			}
		}
	} catch (org.apache.kafka.common.errors.WakeupException e) {
		// rethrow our own wakeup exception
		throw new WakeupException();
	}

	return partitions;
}
 
Example #18
Source File: KafkaClusterManager.java    From doctorkafka with Apache License 2.0 6 votes vote down vote up
public Map<Integer, List<TopicPartition>> getBrokerLeaderPartitions(
    Map<String, List<PartitionInfo>> topicPartitonInfoMap) {
  Map<Integer, List<TopicPartition>> result = new HashMap<>();

  for (String topic : topicPartitonInfoMap.keySet()) {
    List<PartitionInfo> partitionInfoList = topicPartitonInfoMap.get(topic);
    if (partitionInfoList == null) {
      LOG.error("Failed to get partition info for {}", topic);
      continue;
    }

    for (PartitionInfo info : partitionInfoList) {
      Node leaderNode = info.leader();
      if (leaderNode != null) {
        result.putIfAbsent(leaderNode.id(), new ArrayList<>());
        TopicPartition topicPartiton = new TopicPartition(info.topic(), info.partition());
        result.get(leaderNode.id()).add(topicPartiton);
      }
    }
  }
  return result;
}
 
Example #19
Source File: KafkaClient.java    From kylin with Apache License 2.0 6 votes vote down vote up
public static Map<Integer, Long> getLatestOffsets(final CubeInstance cubeInstance) {
    final KafkaConfig kafkaConfig = KafkaConfigManager.getInstance(KylinConfig.getInstanceFromEnv()).getKafkaConfig(cubeInstance.getRootFactTable());

    final String brokers = KafkaClient.getKafkaBrokers(kafkaConfig);
    final String topic = kafkaConfig.getTopic();

    Map<Integer, Long> startOffsets = Maps.newHashMap();
    try (final KafkaConsumer consumer = KafkaClient.getKafkaConsumer(brokers, cubeInstance.getName())) {
        final List<PartitionInfo> partitionInfos = consumer.partitionsFor(topic);
        for (PartitionInfo partitionInfo : partitionInfos) {
            long latest = getLatestOffset(consumer, topic, partitionInfo.partition());
            startOffsets.put(partitionInfo.partition(), latest);
        }
    }
    return startOffsets;
}
 
Example #20
Source File: KafkaServiceImplTest.java    From metron with Apache License 2.0 5 votes vote down vote up
@Test
public void listTopicsHappyPathWithListTopicsReturningEmptyMap() {
  final Map<String, List<PartitionInfo>> topics = new HashMap<>();

  when(kafkaConsumer.listTopics()).thenReturn(topics);

  final Set<String> listedTopics = kafkaService.listTopics();

  assertEquals(Sets.newHashSet(), listedTopics);

  verifyNoInteractions(zkUtils);
  verify(kafkaConsumer).listTopics();
  verify(kafkaConsumer).close();
  verifyNoMoreInteractions(kafkaConsumer, zkUtils);
}
 
Example #21
Source File: ProducerTest.java    From kbear with Apache License 2.0 5 votes vote down vote up
protected void checkOtherApis(Producer<String, String> producer) {
    topics.forEach(t -> {
        List<PartitionInfo> partitions = producer.partitionsFor(t);
        Assert.assertNotNull(partitions);
        Assert.assertEquals(1, partitions.size());
    });

    Map<MetricName, ?> metrics = producer.metrics();
    System.out.println("metrics: " + metrics);
    Assert.assertFalse(CollectionExtension.isEmpty(metrics));
}
 
Example #22
Source File: LiKafkaInstrumentedConsumerImpl.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
public Map<String, List<PartitionInfo>> listTopics() {
  try (
      @SuppressWarnings("unused") CloseableLock uLock = new CloseableLock(userLock);
      @SuppressWarnings("unused") CloseableLock srLock = new CloseableLock(delegateLock.readLock())
  ) {
    verifyOpen();
    return delegate.listTopics();
  }
}
 
Example #23
Source File: DemoPartitioner.java    From BigData-In-Practice with Apache License 2.0 5 votes vote down vote up
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
    List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
    int numPartitions = partitions.size();
    if (null == keyBytes) {
        return counter.getAndIncrement() % numPartitions;
    } else
        return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions;
}
 
Example #24
Source File: Kafka0_9ConsumerLoader.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Override
public List<TopicPartition> getTopicPartitions(String topic) {
  return ((Set<PartitionInfo>) delegate.assignment())
      .stream()
      .map(partitionInfo -> new TopicPartition(topic, partitionInfo.partition()))
      .collect(Collectors.toList());
}
 
Example #25
Source File: KafkaSource.java    From kylin with Apache License 2.0 5 votes vote down vote up
@Override
public String getMessageTemplate(StreamingSourceConfig streamingSourceConfig) {
    String template = null;
    KafkaConsumer<byte[], byte[]> consumer = null;
    try {
        String topicName = getTopicName(streamingSourceConfig.getProperties());
        Map<String, Object> config = getKafkaConf(streamingSourceConfig.getProperties());
        consumer = new KafkaConsumer<>(config);
        Set<TopicPartition> partitions = Sets.newHashSet(FluentIterable.from(consumer.partitionsFor(topicName))
                .transform(new Function<PartitionInfo, TopicPartition>() {
                    @Override
                    public TopicPartition apply(PartitionInfo input) {
                        return new TopicPartition(input.topic(), input.partition());
                    }
                }));
        consumer.assign(partitions);
        consumer.seekToBeginning(partitions);
        ConsumerRecords<byte[], byte[]> records = consumer.poll(500);
        if (records == null) {
            return null;
        }
        Iterator<ConsumerRecord<byte[], byte[]>> iterator = records.iterator();
        if (iterator == null || !iterator.hasNext()) {
            return null;
        }
        ConsumerRecord<byte[], byte[]> record = iterator.next();
        template = new String(record.value(), "UTF8");
    } catch (Exception e) {
        logger.error("error when fetch one record from kafka, stream:" + streamingSourceConfig.getName(), e);
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
    return template;
}
 
Example #26
Source File: LiKafkaInstrumentedProducerImpl.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
public List<PartitionInfo> partitionsFor(String topic) {
  verifyOpen();

  delegateLock.readLock().lock();
  try {
    return delegate.partitionsFor(topic);
  } finally {
    delegateLock.readLock().unlock();
  }
}
 
Example #27
Source File: ExecutionTaskManagerTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
private Cluster generateExpectedCluster(ExecutionProposal proposal, TopicPartition tp) {
  List<Node> expectedReplicas = new ArrayList<>(proposal.oldReplicas().size());
  expectedReplicas.add(new Node(0, "null", -1));
  expectedReplicas.add(new Node(2, "null", -1));

  Node[] isrArray = new Node[expectedReplicas.size()];
  isrArray = expectedReplicas.toArray(isrArray);

  Set<PartitionInfo> partitions = new HashSet<>();
  partitions.add(new PartitionInfo(tp.topic(), tp.partition(), expectedReplicas.get(1), isrArray, isrArray));

  return new Cluster(null, expectedReplicas, partitions, Collections.emptySet(), Collections.emptySet());
}
 
Example #28
Source File: BrokerLoad.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Verify whether we have collected enough metrics to generate the broker metric samples. The broker must have
 * missed less than {@link #MAX_ALLOWED_MISSING_TOPIC_METRIC_PERCENT} of the topic level
 * and {@link #MAX_ALLOWED_MISSING_PARTITION_METRIC_PERCENT} partition level metrics in the
 * broker to generate broker level metrics.
 *
 * @param cluster The Kafka cluster.
 * @param brokerId The broker id to check.
 * @return True if there are enough topic level metrics, false otherwise.
 */
private boolean enoughTopicPartitionMetrics(Cluster cluster, int brokerId) {
  Set<String> missingTopics = new HashSet<>();
  Set<String> topicsInBroker = new HashSet<>();
  AtomicInteger missingPartitions = new AtomicInteger(0);
  List<PartitionInfo> leaderPartitionsInNode = cluster.partitionsForNode(brokerId);
  if (leaderPartitionsInNode.isEmpty()) {
    // If the broker does not have any leader partition, return true immediately.
    return true;
  }
  leaderPartitionsInNode.forEach(info -> {
    String topicWithDotHandled = replaceDotsWithUnderscores(info.topic());
    topicsInBroker.add(topicWithDotHandled);
    if (!_dotHandledTopicsWithPartitionSizeReported.contains(topicWithDotHandled)) {
      missingPartitions.incrementAndGet();
      missingTopics.add(topicWithDotHandled);
    }
  });
  boolean result = ((double) missingTopics.size() / topicsInBroker.size()) <= MAX_ALLOWED_MISSING_TOPIC_METRIC_PERCENT
                   && ((double) missingPartitions.get() / cluster.partitionsForNode(brokerId).size()
                       <= MAX_ALLOWED_MISSING_PARTITION_METRIC_PERCENT);
  if (!result) {
    LOG.warn("Broker {} is missing {}/{} topics metrics and {}/{} leader partition metrics. Missing leader topics: {}.", brokerId,
             missingTopics.size(), topicsInBroker.size(), missingPartitions.get(), cluster.partitionsForNode(brokerId).size(), missingTopics);
  }
  return result;
}
 
Example #29
Source File: KafkaInstrumentationHelperImpl.java    From apm-agent-java with Apache License 2.0 5 votes vote down vote up
@Override
public void onSendEnd(Span span, ProducerRecord producerRecord, KafkaProducer kafkaProducer, @Nullable Throwable throwable) {

    // Topic address collection is normally very fast, as it uses cached cluster state information. However,
    // when the cluster metadata is required to be updated, its query may block for a short period. In
    // addition, the discovery operation allocates two objects. Therefore, we have the ability to turn it off.
    if (messagingConfiguration.shouldCollectQueueAddress()) {
        try {
            // Better get the destination now, as if the partition's leader was replaced, it may be reflected at
            // this point.
            @SuppressWarnings("unchecked") List<PartitionInfo> partitions = kafkaProducer.partitionsFor(producerRecord.topic());
            Integer partition = producerRecord.partition();
            PartitionInfo partitionInfo = null;
            if (partition != null) {
                partitionInfo = partitions.get(partition);
            } else if (!partitions.isEmpty()) {
                // probably not a partitioned topic, so look for the singe entry
                partitionInfo = partitions.get(0);
            }
            if (partitionInfo != null) {
                // Records are always sent to the leader of a partition and then synced with replicas internally by
                // the broker.
                Node leader = partitionInfo.leader();
                if (leader != null) {
                    span.getContext().getDestination().withAddress(leader.host()).withPort(leader.port());
                }
            }
        } catch (Exception e) {
            logger.error("Failed to get Kafka producer's destination", e);
        }
    }

    span.captureException(throwable);

    // Not ending here- ending in the callback
    span.deactivate();
}
 
Example #30
Source File: KafkaSystemAdmin.java    From samza with Apache License 2.0 5 votes vote down vote up
@Override
public Set<SystemStream> getAllSystemStreams() {
  Map<String, List<PartitionInfo>> topicToPartitionInfoMap = threadSafeKafkaConsumer.execute(consumer -> consumer.listTopics());
  Set<SystemStream> systemStreams = topicToPartitionInfoMap.keySet()
                                                           .stream()
                                                           .map(topic -> new SystemStream(systemName, topic))
                                                           .collect(Collectors.toSet());
  return systemStreams;
}