Java Code Examples for kafka.utils.ZkUtils#getAllTopics()

The following examples show how to use kafka.utils.ZkUtils#getAllTopics() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: URPChecker.java    From doctorkafka with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
  CommandLine commandLine = parseCommandLine(args);
  String zookeeper = commandLine.getOptionValue(ZOOKEEPER);

  ZkUtils zkUtils = KafkaUtils.getZkUtils(zookeeper);
  Seq<String> topicsSeq = zkUtils.getAllTopics();
  List<String> topics = scala.collection.JavaConverters.seqAsJavaList(topicsSeq);

  scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>>
      partitionAssignments = zkUtils.getPartitionAssignmentForTopics(topicsSeq);

  Map<String, Integer> replicationFactors = new HashMap<>();
  Map<String, Integer> partitionCounts = new HashMap<>();

  topics.stream().forEach(topic -> {
    int partitionCount = partitionAssignments.get(topic).get().size();
    int factor = partitionAssignments.get(topic).get().head()._2().size();
    partitionCounts.put(topic, partitionCount);
    replicationFactors.put(topic, factor);
  });

  List<PartitionInfo> urps = KafkaClusterManager.getUnderReplicatedPartitions(
      zookeeper, SecurityProtocol.PLAINTEXT, null, topics, partitionAssignments, replicationFactors, partitionCounts);

  for (PartitionInfo partitionInfo : urps) {
    LOG.info("under-replicated : {}", partitionInfo);
  }
}
 
Example 2
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 5 votes vote down vote up
private static void printCurrentAssignment(ZkUtils zkUtils, List<String> specifiedTopics) {
    Seq<String> topics = specifiedTopics != null ?
            JavaConversions.iterableAsScalaIterable(specifiedTopics).toSeq() :
            zkUtils.getAllTopics();
    System.out.println("CURRENT ASSIGNMENT:");
    System.out.println(
            zkUtils.formatAsReassignmentJson(zkUtils.getReplicaAssignmentForTopics(
                    topics)));
}
 
Example 3
Source File: KafkaClusterManager.java    From doctorkafka with Apache License 2.0 4 votes vote down vote up
/**
 *  KafkaClusterManager periodically check the health of the cluster. If it finds
 *  an under-replicated partitions, it will perform partition reassignment. It will also
 *  do partition reassignment for workload balancing.
 *
 *  If partitions are under-replicated in the middle of work-load balancing due to
 *  broker failure, it will send out an alert. Human intervention is needed in this case.
 */
@Override
public void run() {
  long checkIntervalInMs = clusterConfig.getCheckIntervalInSeconds() * 1000L;
  stopped = false;
  boolean foundUrps = false;
  long firstSeenUrpsTimestamp = 0L;

  while (!stopped) {
    try {
      Thread.sleep(checkIntervalInMs);
      if (maintenanceMode.get()) {
        LOG.debug("Cluster:" + clusterConfig.getClusterName() + " is in maintenace mode");
        continue;
      }
      ZkUtils zkUtils = KafkaUtils.getZkUtils(zkUrl);

      // check if there is any broker that do not have stats.
      List<Broker> noStatsBrokers = getNoStatsBrokers();
      if (!noStatsBrokers.isEmpty()) {
        Email.alertOnNoStatsBrokers(
            drkafkaConfig.getAlertEmails(), clusterConfig.getClusterName(), noStatsBrokers);
        continue;
      }

      Seq<String> topicsSeq = zkUtils.getAllTopics();
      List<String> topics = scala.collection.JavaConverters.seqAsJavaList(topicsSeq);
      scala.collection.mutable.Map<String, scala.collection.Map<Object, Seq<Object>>>
          partitionAssignments = zkUtils.getPartitionAssignmentForTopics(topicsSeq);

      Map<String, Integer> replicationFactors = new HashMap<>();
      Map<String, Integer> partitionCounts = new HashMap<>();
      topics.stream().forEach(topic -> {
        int partitionCount = partitionAssignments.get(topic).get().size();
        int factor = partitionAssignments.get(topic).get().head()._2().size();
        partitionCounts.put(topic, partitionCount);
        replicationFactors.put(topic, factor);
      });

      underReplicatedPartitions = getUnderReplicatedPartitions(zkUrl, securityProtocol, consumerConfigs,
          topics, partitionAssignments, replicationFactors, partitionCounts);
      LOG.info("Under-replicated partitions: {}", underReplicatedPartitions.size());

      for (PartitionInfo partitionInfo : underReplicatedPartitions) {
        LOG.info("under-replicated : {}", partitionInfo);
      }

      kafkaCluster.clearResourceAllocationCounters();
      if (underReplicatedPartitions.size() > 0) {
        // handle under-replicated partitions
        if (!foundUrps) {
          foundUrps = true;
          firstSeenUrpsTimestamp = System.currentTimeMillis();
        } else {
          // send out an alert if the cluster has been under-replicated for a while
          long underReplicatedTimeMills = System.currentTimeMillis() - firstSeenUrpsTimestamp;
          if (underReplicatedTimeMills > clusterConfig.getUnderReplicatedAlertTimeInMs()) {

            Email.alertOnProlongedUnderReplicatedPartitions(drkafkaConfig.getAlertEmails(),
                clusterConfig.getClusterName(),
                clusterConfig.getUnderReplicatedAlertTimeInSeconds(),
                underReplicatedPartitions);
          }
        }
        LOG.info("Under-replicated partitions in cluster {} : {}",
            clusterConfig.getClusterName(), underReplicatedPartitions.size());

        handleUnderReplicatedPartitions(underReplicatedPartitions, replicationFactors);
      } else {
        foundUrps = false;
        firstSeenUrpsTimestamp = Long.MAX_VALUE;
        if (clusterConfig.enabledWorloadBalancing()) {
          preferredLeaders.clear();
          reassignmentMap.clear();
          balanceWorkload();
        }
      }
      if (clusterConfig.enabledDeadbrokerReplacement()) {
        // replace the brokers that do not have kafkastats update for a while
        checkAndReplaceDeadBrokers();
      }
    } catch (Exception e) {
      LOG.error("Unexpected failure in cluster manager for {}: ", zkUrl, e);
    }
  }
}
 
Example 4
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 4 votes vote down vote up
private static void printLeastDisruptiveReassignment(
        ZkUtils zkUtils, List<String> specifiedTopics, Set<Integer> specifiedBrokers,
        Set<Integer> excludedBrokers, Map<Integer, String> rackAssignment, int desiredReplicationFactor)
        throws JSONException {
    // We need three inputs for rebalacing: the brokers, the topics, and the current assignment
    // of topics to brokers.
    Set<Integer> brokerSet = specifiedBrokers;
    if (brokerSet == null || brokerSet.isEmpty()) {
        brokerSet = Sets.newHashSet(Lists.transform(
                JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster()),
                new Function<Broker, Integer>() {
                    @Override
                    public Integer apply(Broker broker) {
                        return broker.id();
                    }
                }));
    }

    // Exclude brokers that we want to decommission
    Set<Integer> brokers = Sets.difference(brokerSet, excludedBrokers);
    rackAssignment.keySet().retainAll(brokers);

    // The most common use case is to rebalance all topics, but explicit topic addition is also
    // supported.
    Seq<String> topics = specifiedTopics != null ?
            JavaConversions.collectionAsScalaIterable(specifiedTopics).toSeq() :
            zkUtils.getAllTopics();

    // Print the current assignment in case a rollback is needed
    printCurrentAssignment(zkUtils, JavaConversions.seqAsJavaList(topics));

    Map<String, Map<Integer, List<Integer>>> initialAssignments =
            KafkaTopicAssigner.topicMapToJavaMap(zkUtils.getPartitionAssignmentForTopics(
                    topics));

    // Assign topics one at a time. This is slightly suboptimal from a packing standpoint, but
    // it's close enough to work in practice. We can also always follow it up with a Kafka
    // leader election rebalance if necessary.
    JSONObject json = new JSONObject();
    json.put("version", KAFKA_FORMAT_VERSION);
    JSONArray partitionsJson = new JSONArray();
    KafkaTopicAssigner assigner = new KafkaTopicAssigner();
    for (String topic : JavaConversions.seqAsJavaList(topics)) {
        Map<Integer, List<Integer>> partitionAssignment = initialAssignments.get(topic);
        Map<Integer, List<Integer>> finalAssignment = assigner.generateAssignment(
                topic, partitionAssignment, brokers, rackAssignment, desiredReplicationFactor);
        for (Map.Entry<Integer, List<Integer>> e : finalAssignment.entrySet()) {
            JSONObject partitionJson = new JSONObject();
            partitionJson.put("topic", topic);
            partitionJson.put("partition", e.getKey());
            partitionJson.put("replicas", new JSONArray(e.getValue()));
            partitionsJson.put(partitionJson);
        }
    }
    json.put("partitions", partitionsJson);
    System.out.println("NEW ASSIGNMENT:\n" + json.toString());
}