kafka.cluster.BrokerEndPoint Java Examples

The following examples show how to use kafka.cluster.BrokerEndPoint. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 6 votes vote down vote up
private static void printCurrentBrokers(ZkUtils zkUtils) throws JSONException {
    List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster());
    JSONArray json = new JSONArray();
    for (Broker broker : brokers) {
        BrokerEndPoint endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT);
        JSONObject brokerJson = new JSONObject();
        brokerJson.put("id", broker.id());
        brokerJson.put("host", endpoint.host());
        brokerJson.put("port", endpoint.port());
        if (broker.rack().isDefined()) {
            brokerJson.put("rack", broker.rack().get());
        }
        json.put(brokerJson);
    }
    System.out.println("CURRENT BROKERS:");
    System.out.println(json.toString());
}
 
Example #2
Source File: KafkaAssignmentGenerator.java    From kafka-assigner with Apache License 2.0 6 votes vote down vote up
private static Set<Integer> brokerHostnamesToBrokerIds(
        ZkUtils zkUtils, Set<String> brokerHostnameSet, boolean checkPresence) {
    List<Broker> brokers = JavaConversions.seqAsJavaList(zkUtils.getAllBrokersInCluster());
    Set<Integer> brokerIdSet = Sets.newHashSet();
    for (Broker broker : brokers) {
        BrokerEndPoint endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT);
        if (brokerHostnameSet.contains(endpoint.host())) {
            brokerIdSet.add(broker.id());
        }
    }
    Preconditions.checkArgument(!checkPresence ||
            brokerHostnameSet.size() == brokerIdSet.size(),
            "Some hostnames could not be found! We found: " + brokerIdSet);

    return brokerIdSet;
}
 
Example #3
Source File: KafkaPartitionLevelConsumerTest.java    From incubator-pinot with Apache License 2.0 6 votes vote down vote up
public MockKafkaSimpleConsumerFactory(String[] hosts, int[] ports, long[] partitionStartOffsets,
    long[] partitionEndOffsets, int[] partitionLeaderIndices, String topicName) {
  Preconditions.checkArgument(hosts.length == ports.length);
  this.hosts = hosts;
  this.ports = ports;
  brokerCount = hosts.length;

  brokerArray = new BrokerEndPoint[brokerCount];
  for (int i = 0; i < brokerCount; i++) {
    brokerArray[i] = new BrokerEndPoint(i, hosts[i], ports[i]);
  }

  Preconditions.checkArgument(partitionStartOffsets.length == partitionEndOffsets.length);
  Preconditions.checkArgument(partitionStartOffsets.length == partitionLeaderIndices.length);
  this.partitionLeaderIndices = partitionLeaderIndices;
  partitionCount = partitionStartOffsets.length;

  this.topicName = topicName;
}
 
Example #4
Source File: KafkaBaseInfoService.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
public List<BrokerEndPoint> getBrokerEndPoints() {
    List<BrokerEndPoint> endPointList = new ArrayList<>();
    brokerCache.values().stream().forEach(b -> {
        BrokerEndPoint endPoint = new BrokerEndPoint(b.getId(), b.getHost(), b.getPort());
        endPointList.add(endPoint);
    });
    return endPointList;
}
 
Example #5
Source File: ZkConsumerCommand.java    From azeroth with Apache License 2.0 5 votes vote down vote up
/**
 * 获取指定主题及分区logsize
 * @param stat
 */
public void getTopicPartitionLogSize(TopicPartitionInfo stat) {
    BrokerEndPoint leader = findLeader(stat.getTopic(), stat.getPartition()).leader();
    SimpleConsumer consumer = getConsumerClient(leader.host(), leader.port());

    try {
        long logsize = getLastOffset(consumer, stat.getTopic(), stat.getPartition(),
            kafka.api.OffsetRequest.LatestTime());
        stat.setLogSize(logsize);
    } finally {
        consumer.close();
    }
}
 
Example #6
Source File: OffsetMonitor.java    From uReplicator with Apache License 2.0 5 votes vote down vote up
protected void updateOffset() {
  logger.debug("OffsetMonitor updates offset with leaders=" + partitionLeader);

  offsetMonitorFailureCount.set(0);
  for (Map.Entry<TopicAndPartition, BrokerEndPoint> entry : partitionLeader.entrySet()) {
    String leaderBroker = getHostPort(entry.getValue());
    TopicAndPartition tp = entry.getKey();
    if (StringUtils.isEmpty(leaderBroker)) {
      logger.warn("{} does not have leader partition", tp);
    } else {
      try {
        cronExecutor.submit(updateOffsetTask(leaderBroker, tp));
      } catch (RejectedExecutionException re) {
        offsetMonitorFailureCount.getAndAdd(1);
        logger.warn(String.format("cronExecutor is full! Drop task for topic: %s, partition: %d",
            tp.topic(), tp.partition()), re);
        throw re;
      } catch (Throwable t) {
        offsetMonitorFailureCount.getAndAdd(1);
        logger.error(
            String.format("cronExecutor got throwable! Drop task for topic: %s, partition: %d",
                tp.topic(), tp.partition()), t);
        throw t;
      }
    }
  }
}
 
Example #7
Source File: ZkConsumerCommand.java    From jeesuite-libs with Apache License 2.0 5 votes vote down vote up
/**
   * 获取指定主题及分区logsize
   * @param stat
   */
  public void getTopicPartitionLogSize(TopicPartitionInfo stat){
  	BrokerEndPoint leader = findLeader(stat.getTopic(), stat.getPartition()).leader();
  	SimpleConsumer consumer = getConsumerClient(leader.host(), leader.port());	
  	
  	try {			
  		long logsize = getLastOffset(consumer,stat.getTopic(), stat.getPartition(), kafka.api.OffsetRequest.LatestTime());
  		stat.setLogSize(logsize);
} finally {
	consumer.close();
}
  }
 
Example #8
Source File: KafkaPartitionLevelConsumerTest.java    From incubator-pinot with Apache License 2.0 5 votes vote down vote up
@Override
public TopicMetadataResponse send(TopicMetadataRequest request) {
  java.util.List<String> topics = request.topics();
  TopicMetadata[] topicMetadataArray = new TopicMetadata[topics.size()];

  for (int i = 0; i < topicMetadataArray.length; i++) {
    String topic = topics.get(i);
    if (!topic.equals(topicName)) {
      topicMetadataArray[i] = new TopicMetadata(topic, null, Errors.UNKNOWN_TOPIC_OR_PARTITION.code());
    } else {
      PartitionMetadata[] partitionMetadataArray = new PartitionMetadata[partitionCount];
      for (int j = 0; j < partitionCount; j++) {
        java.util.List<BrokerEndPoint> emptyJavaList = Collections.emptyList();
        List<BrokerEndPoint> emptyScalaList = JavaConversions.asScalaBuffer(emptyJavaList).toList();
        partitionMetadataArray[j] =
            new PartitionMetadata(j, Some.apply(brokerArray[partitionLeaderIndices[j]]), emptyScalaList,
                emptyScalaList, Errors.NONE.code());
      }

      Seq<PartitionMetadata> partitionsMetadata = List.fromArray(partitionMetadataArray);
      topicMetadataArray[i] = new TopicMetadata(topic, partitionsMetadata, Errors.NONE.code());
    }
  }

  Seq<BrokerEndPoint> brokers = List.fromArray(brokerArray);
  Seq<TopicMetadata> topicsMetadata = List.fromArray(topicMetadataArray);

  return new TopicMetadataResponse(new kafka.api.TopicMetadataResponse(brokers, topicsMetadata, -1));
}
 
Example #9
Source File: KafkaStreamReader.java    From arcusplatform with Apache License 2.0 4 votes vote down vote up
private SimpleConsumer connect(BrokerEndPoint leader) {
   HostAndPort broker = config.getBrokerOverride(leader.id()).orElse(HostAndPort.fromParts(leader.host(), leader.port()));
   return new SimpleConsumer(broker.getHostText(), broker.getPortOrDefault(9092), config.getSoTimeoutMs(), config.getBufferSize(), config.getClientId());
}
 
Example #10
Source File: KafkaBaseInfoService.java    From kafka-monitor with Apache License 2.0 4 votes vote down vote up
/**
 * 取得topic元数据
 *
 * @param topics
 * @return
 */
public Map<String, Topic> getTopicMetadata(String... topics) {

    //请求topic元数据
    kafka.api.TopicMetadataResponse response = ClientUtils.fetchTopicMetadata(JavaConversions.asScalaIterable(Arrays.asList(topics)).toSet(), JavaConversions.asScalaBuffer(getBrokerEndPoints()), "test", 2000, 1);

    //从元数据中取得topic信息
    Map<String, Topic> topicMap = WrapAsJava$.MODULE$.seqAsJavaList(response.topicsMetadata())
            .stream().filter(error -> error.errorCode() == ErrorMapping.NoError())
            .map((temp) -> {
                Topic topic = new Topic(temp.topic());
                topic.setConfig(JSONObject.parseObject(topicConfigCache.getCurrentData(ZkUtils.EntityConfigPath() + "/topics/" + temp.topic()).getData(), Map.class));
                List<PartitionMetadata> pMetadata = WrapAsJava$.MODULE$.seqAsJavaList(temp.partitionsMetadata());
                topic.setPartitionMap(
                        pMetadata.stream()
                                .map((pMta) -> {
                                    //添加Partition副本信息
                                    Partition partition = new Partition(pMta.partitionId());
                                    BrokerEndPoint leader;
                                    int leaderId = -1;
                                    if (pMta.leader().nonEmpty()) {
                                        leader = pMta.leader().get();
                                        leaderId = leader.id();
                                    }

                                    partition.setIsr(WrapAsJava$.MODULE$.seqAsJavaList(pMta.isr()).stream().mapToInt(i -> i.id()).toArray());


                                    for (BrokerEndPoint replica :
                                            WrapAsJava$.MODULE$.seqAsJavaList(pMta.replicas())) {
                                        boolean isLeader = false;
                                        if (replica.id() == leaderId) {
                                            isLeader = true;
                                        }
                                        partition.addReplica(new PartitionReplica(replica.id(), true, isLeader));
                                    }

                                    partition.setReplicasArray(WrapAsJava$.MODULE$.seqAsJavaList(pMta.replicas()).stream().mapToInt(m -> m.id()).toArray());

                                    if (pMta.replicas().size() > 0) {
                                        //首选副本
                                        BrokerEndPoint preferedReplica = WrapAsJava$.MODULE$.seqAsJavaList(pMta.replicas()).get(0);
                                        //首选副本等于leader
                                        if (leaderId == preferedReplica.id()) {
                                            partition.setPreferredLeaderId(leaderId);
                                        }
                                    }
                                    return partition;
                                }).collect(Collectors.toMap(Partition::getId, p -> p))
                );
                return topic;
            }).collect(Collectors.toMap(Topic::getName, t -> t));

    return topicMap;
}
 
Example #11
Source File: OffsetMonitor.java    From uReplicator with Apache License 2.0 4 votes vote down vote up
private static String getHostPort(BrokerEndPoint leader) {
  if (leader != null) {
    return leader.host() + ":" + leader.port();
  }
  return null;
}