Java Code Examples for org.apache.kafka.common.TopicPartitionInfo#replicas()

The following examples show how to use org.apache.kafka.common.TopicPartitionInfo#replicas() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaAvailability.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
private Set<TopicDescription> groupTopicsByBroker(Collection<TopicDescription> tds, int podId) {
    Set<TopicDescription> topicPartitionInfos = new HashSet<>();
    for (TopicDescription td : tds) {
        log.trace("{}", td);
        for (TopicPartitionInfo pd : td.partitions()) {
            for (Node broker : pd.replicas()) {
                if (podId == broker.id()) {
                    topicPartitionInfos.add(td);
                }
            }
        }
    }
    return topicPartitionInfos;
}
 
Example 2
Source File: KafkaPoolCfg.java    From feeyo-redisproxy with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
/**
 * 
 * 
	zhuamdeMacBook-Pro:logs zhuam$ [2018-05-28 15:26:41,394] INFO [Admin Manager on Broker 0]: 
	Error processing create topic request for topic test01 with arguments (numPartitions=3, replicationFactor=2, replicasAssignments={}, configs={}) (kafka.server.AdminManager)
	org.apache.kafka.common.errors.InvalidReplicationFactorException: Replication factor: 2 larger than available brokers: 1.
 */
private void initializeOfKafka(Map<String, TopicCfg> topicCfgMap) throws Exception, org.apache.kafka.common.errors.TimeoutException {
	
	if (topicCfgMap == null || topicCfgMap.isEmpty()) {
		return;
	}
	
	// Get server address for kafka
	StringBuffer servers = new StringBuffer();
	List<String> nodes = this.getNodes();
	for (int i = 0; i < nodes.size(); i++) {
		String str = nodes.get(i);
		String[] node = str.split(":");
		servers.append(node[0]).append(":").append(node[1]);
		if (i < nodes.size() - 1) {
			servers.append(",");
		}
	}
	
	KafkaAdmin kafkaAdmin = null;
	try {
		// 获取 Kafka 管理对象
		kafkaAdmin = KafkaAdmin.create( servers.toString() );
		
		// 获取kafka中的topic情况
		Map<String, TopicDescription> remoteTopics = kafkaAdmin.getTopicAndDescriptions();
		Collection<Node> clusterNodes = kafkaAdmin.getClusterNodes();
		for (TopicCfg topicCfg : topicCfgMap.values()) {

			String topicName = topicCfg.getName();
			short replicationFactor = topicCfg.getReplicationFactor();
			int partitionNum = topicCfg.getPartitions();

			TopicDescription topicDescription = remoteTopics.get(topicName);
			if (topicDescription != null) {
				int oldPartitionNum = topicDescription.partitions().size();
				if (partitionNum > oldPartitionNum) {
					// add partition
					kafkaAdmin.addPartitionsForTopic(topicName, partitionNum);
					topicDescription = kafkaAdmin.getDescriptionByTopicName(topicName);
				}
				
			} else {
				//verify
				if(clusterNodes == null || replicationFactor > clusterNodes.size()) {
					throw new Exception( "kafka topicName="+ topicName + ", no enough alive physical nodes for replication");
				}
				
				// create topic
				kafkaAdmin.createTopic(topicName, partitionNum, replicationFactor);
				topicDescription = kafkaAdmin.getDescriptionByTopicName(topicName);
			}
			
			// 
			if ( topicDescription == null) {
				throw new Exception( " kafka topicName=" + topicName  + ", description is null.");
			} 

			//
			String name = topicDescription.name();
			boolean internal = topicDescription.isInternal();
			int partitionSize = topicDescription.partitions().size();
			
			//
			BrokerPartition[] newPartitions = new BrokerPartition[ partitionSize ];
			for (int i = 0; i < partitionSize; i++) {
				
				TopicPartitionInfo partitionInfo =  topicDescription.partitions().get(i);
				int partition = partitionInfo.partition();
				
				Node leader = partitionInfo.leader();
				BrokerNode newLeader = new BrokerNode(leader.id(), leader.host(), leader.port());
				
				List<Node> replicas = partitionInfo.replicas();
				BrokerNode[] newReplicas = new BrokerNode[replicas.size()];
				for (int j = 0; j < replicas.size(); j++) {
					newReplicas[j] = new BrokerNode(replicas.get(j).id(), replicas.get(j).host(), replicas.get(j).port());
				}
				
				BrokerPartition newPartition = new BrokerPartition(partition, newLeader, newReplicas);
				newPartitions[i] = newPartition;
			}

			topicCfg.setRunningInfo( new BrokerRunningInfo(name, internal, newPartitions) );
		}
		
	} catch(Throwable e) {
		throw new Exception("kafka pool init err: " + servers.toString(), e);
		
	} finally {
		if (kafkaAdmin != null)
			kafkaAdmin.close();
	}
}