kafka.javaapi.TopicMetadataRequest Java Examples

The following examples show how to use kafka.javaapi.TopicMetadataRequest. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaWrapper.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, String... selectedTopics) {
  LOG.info(String.format("Fetching topic metadata from broker %s", broker));
  SimpleConsumer consumer = null;
  try {
    consumer = getSimpleConsumer(broker);
    for (int i = 0; i < this.fetchTopicRetries; i++) {
      try {
        return consumer.send(new TopicMetadataRequest(Arrays.asList(selectedTopics))).topicsMetadata();
      } catch (Exception e) {
        LOG.warn(String.format("Fetching topic metadata from broker %s has failed %d times.", broker, i + 1), e);
        try {
          Thread.sleep((long) ((i + Math.random()) * 1000));
        } catch (InterruptedException e2) {
          LOG.warn("Caught InterruptedException: " + e2);
        }
      }
    }
  } finally {
    if (consumer != null) {
      consumer.close();
    }
  }
  return null;
}
 
Example #2
Source File: KafkaTool.java    From Scribengin with GNU Affero General Public License v3.0 6 votes vote down vote up
public TopicMetadata findTopicMetadata(final String topic, int retries) throws Exception {
  Operation<TopicMetadata> findTopicOperation = new Operation<TopicMetadata>() {
    @Override
    public TopicMetadata execute() throws Exception {
      List<String> topics = Collections.singletonList(topic);
      TopicMetadataRequest req = new TopicMetadataRequest(topics);
      TopicMetadataResponse resp = consumer.send(req);

      List<TopicMetadata> topicMetadatas = resp.topicsMetadata();
      if (topicMetadatas.size() != 1) {
        throw new Exception("Expect to find 1 topic " + topic + ", but found " + topicMetadatas.size());
      }

      return topicMetadatas.get(0);
    }
  };
  return execute(findTopicOperation, retries);
}
 
Example #3
Source File: LegacyKafkaClient.java    From secor with Apache License 2.0 6 votes vote down vote up
@Override
public int getNumPartitions(String topic) {
    SimpleConsumer consumer = null;
    try {
        consumer = createConsumer(
            mConfig.getKafkaSeedBrokerHost(),
            mConfig.getKafkaSeedBrokerPort(),
            "partitionLookup");
        List<String> topics = new ArrayList<String>();
        topics.add(topic);
        TopicMetadataRequest request = new TopicMetadataRequest(topics);
        TopicMetadataResponse response = consumer.send(request);
        if (response.topicsMetadata().size() != 1) {
            throw new RuntimeException("Expected one metadata for topic " + topic + " found " +
                response.topicsMetadata().size());
        }
        TopicMetadata topicMetadata = response.topicsMetadata().get(0);
        return topicMetadata.partitionsMetadata().size();
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
}
 
Example #4
Source File: Kafka08ConsumerClient.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, String... selectedTopics) {
  log.info(String.format("Fetching topic metadata from broker %s", broker));
  SimpleConsumer consumer = null;
  try {
    consumer = getSimpleConsumer(broker);
    for (int i = 0; i < this.fetchTopicRetries; i++) {
      try {
        return consumer.send(new TopicMetadataRequest(Arrays.asList(selectedTopics))).topicsMetadata();
      } catch (Exception e) {
        log.warn(String.format("Fetching topic metadata from broker %s has failed %d times.", broker, i + 1), e);
        try {
          Thread.sleep((long) ((i + Math.random()) * 1000));
        } catch (InterruptedException e2) {
          log.warn("Caught InterruptedException: " + e2);
        }
      }
    }
  } finally {
    if (consumer != null) {
      consumer.close();
    }
  }
  return null;
}
 
Example #5
Source File: KafkaUtils.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
public TopicMetadataResponse topicMetadataRequest(BlockingChannel channel, String[] topics) {
    TopicMetadataRequest request = new TopicMetadataRequest((short) 0, 0, "kafkaMonitor", Arrays.asList(topics));
    channel.send(request);
    final kafka.api.TopicMetadataResponse underlyingResponse =
            kafka.api.TopicMetadataResponse.readFrom(channel.receive().payload());
    TopicMetadataResponse response = new TopicMetadataResponse(underlyingResponse);
    return response;
}
 
Example #6
Source File: KafkaInputFormat.java    From HiveKa with Apache License 2.0 5 votes vote down vote up
/**
 * Gets the metadata from Kafka
 * 
 * @param conf
 * @return
 */
public List<TopicMetadata> getKafkaMetadata(JobConf conf) {
	ArrayList<String> metaRequestTopics = new ArrayList<String>();
	String brokerString = getKafkaBrokers(conf);
	if (brokerString.isEmpty())
		throw new InvalidParameterException("kafka.brokers must contain at least one node");
               List<String> brokers = Arrays.asList(brokerString.split("\\s*,\\s*"));
	Collections.shuffle(brokers);
	boolean fetchMetaDataSucceeded = false;
	int i = 0;
	List<TopicMetadata> topicMetadataList = null;
	Exception savedException = null;
	while (i < brokers.size() && !fetchMetaDataSucceeded) {
     log.info("Trying to connect to broker: " + brokers.get(i));
		SimpleConsumer consumer = createConsumer(conf, brokers.get(i));
		log.info(String.format("Fetching metadata from broker %s with client id %s for %d topic(s) %s",
		brokers.get(i), consumer.clientId(), metaRequestTopics.size(), metaRequestTopics));
		try {
			topicMetadataList = consumer.send(new TopicMetadataRequest(metaRequestTopics)).topicsMetadata();
			fetchMetaDataSucceeded = true;
		} catch (Exception e) {
			savedException = e;
			log.warn(String.format("Fetching topic metadata with client id %s for topics [%s] from broker [%s] failed",
				consumer.clientId(), metaRequestTopics, brokers.get(i)), e);
		} finally {
			consumer.close();
			i++;
		}
	}
	if (!fetchMetaDataSucceeded) {
		throw new RuntimeException("Failed to obtain metadata!", savedException);
	}
	return topicMetadataList;
}
 
Example #7
Source File: LegacyKafkaClient.java    From secor with Apache License 2.0 5 votes vote down vote up
private HostAndPort findLeader(TopicPartition topicPartition) {
    SimpleConsumer consumer = null;
    try {
        LOG.debug("looking up leader for topic {} partition {}", topicPartition.getTopic(), topicPartition.getPartition());
        consumer = createConsumer(
            mConfig.getKafkaSeedBrokerHost(),
            mConfig.getKafkaSeedBrokerPort(),
            "leaderLookup");
        List<String> topics = new ArrayList<String>();
        topics.add(topicPartition.getTopic());
        TopicMetadataRequest request = new TopicMetadataRequest(topics);
        TopicMetadataResponse response = consumer.send(request);

        List<TopicMetadata> metaData = response.topicsMetadata();
        for (TopicMetadata item : metaData) {
            for (PartitionMetadata part : item.partitionsMetadata()) {
                if (part.partitionId() == topicPartition.getPartition()) {
                    return HostAndPort.fromParts(part.leader().host(), part.leader().port());
                }
            }
        }
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
    return null;
}
 
Example #8
Source File: KafkaTopicService.java    From Decision with Apache License 2.0 5 votes vote down vote up
@Override
public Integer getNumPartitionsForTopic(String topic){
    TopicMetadataRequest topicRequest = new TopicMetadataRequest(Arrays.asList(topic));
    TopicMetadataResponse topicResponse = simpleConsumer.send(topicRequest);
    for (TopicMetadata topicMetadata : topicResponse.topicsMetadata()) {
        if (topic.equals(topicMetadata.topic())) {
            int partitionSize = topicMetadata.partitionsMetadata().size();
            logger.debug("Partition size found ({}) for {} topic", partitionSize, topic);
            return partitionSize;
        }
    }
    logger.warn("Metadata info not found!. TOPIC {}", topic);
    return null;
}
 
Example #9
Source File: KafkaPartitionLevelConsumerTest.java    From incubator-pinot with Apache License 2.0 5 votes vote down vote up
@Override
public TopicMetadataResponse send(TopicMetadataRequest request) {
  java.util.List<String> topics = request.topics();
  TopicMetadata[] topicMetadataArray = new TopicMetadata[topics.size()];

  for (int i = 0; i < topicMetadataArray.length; i++) {
    String topic = topics.get(i);
    if (!topic.equals(topicName)) {
      topicMetadataArray[i] = new TopicMetadata(topic, null, Errors.UNKNOWN_TOPIC_OR_PARTITION.code());
    } else {
      PartitionMetadata[] partitionMetadataArray = new PartitionMetadata[partitionCount];
      for (int j = 0; j < partitionCount; j++) {
        java.util.List<BrokerEndPoint> emptyJavaList = Collections.emptyList();
        List<BrokerEndPoint> emptyScalaList = JavaConversions.asScalaBuffer(emptyJavaList).toList();
        partitionMetadataArray[j] =
            new PartitionMetadata(j, Some.apply(brokerArray[partitionLeaderIndices[j]]), emptyScalaList,
                emptyScalaList, Errors.NONE.code());
      }

      Seq<PartitionMetadata> partitionsMetadata = List.fromArray(partitionMetadataArray);
      topicMetadataArray[i] = new TopicMetadata(topic, partitionsMetadata, Errors.NONE.code());
    }
  }

  Seq<BrokerEndPoint> brokers = List.fromArray(brokerArray);
  Seq<TopicMetadata> topicsMetadata = List.fromArray(topicMetadataArray);

  return new TopicMetadataResponse(new kafka.api.TopicMetadataResponse(brokers, topicsMetadata, -1));
}
 
Example #10
Source File: ScribenginAM.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
private void getMetaData(String topic) {
  LOG.info("inside getMetaData"); //xxx
  LOG.info("seedBrokerList" + this.brokerList); //xxx

  for (HostPort seed: brokerList) {
    SimpleConsumer consumer = new SimpleConsumer(
        seed.getHost(),
        seed.getPort(),
        10000,   // timeout
        64*1024, // bufferSize
        "metaLookup"  // clientId
        );
    List <String> topicList = Collections.singletonList(topic);

    TopicMetadataRequest req = new TopicMetadataRequest(topicList);
    kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
    List<TopicMetadata> metaDataList = resp.topicsMetadata();
    LOG.info("metaDataList: " + metaDataList); //xxxx

    for (TopicMetadata m: metaDataList) {
      LOG.info("inside the metadatalist loop"); //xxx
      LOG.info("m partitionsMetadata: " + m.partitionsMetadata()); //xxx
      for (PartitionMetadata part : m.partitionsMetadata()) {
        LOG.info("inside the partitionmetadata loop"); //xxx
        storeMetadata(topic, part);
      }
    }
  }
}
 
Example #11
Source File: KafkaStreamReader.java    From arcusplatform with Apache License 2.0 4 votes vote down vote up
private List<TopicMetadata> getMetadata(SimpleConsumer consumer) {
   TopicMetadataRequest request = new TopicMetadataRequest(ImmutableList.copyOf(config.getTopics()));
   TopicMetadataResponse response = consumer.send(request);
   return response.topicsMetadata();
}
 
Example #12
Source File: OffsetMonitor.java    From uReplicator with Apache License 2.0 4 votes vote down vote up
private void updateTopicList() {
  logger.info("Update topicList");
  topicList.clear();
  partitionLeader.clear();

  // update topicList
  topicList = helixMirrorMakerManager.getTopicLists();
  logger.debug("TopicList: {}", topicList);
  Set<String> topicSet = new HashSet<>(topicList);

  // update partitionLeader
  for (String broker : srcBrokerList) {
    try {
      SimpleConsumer consumer = getSimpleConsumer(broker);
      TopicMetadataRequest req = new TopicMetadataRequest(topicList);
      kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
      List<TopicMetadata> metaData = resp.topicsMetadata();

      for (TopicMetadata tmd : metaData) {
        for (PartitionMetadata pmd : tmd.partitionsMetadata()) {
          TopicAndPartition topicAndPartition = new TopicAndPartition(tmd.topic(),
              pmd.partitionId());
          if (topicSet.contains(tmd.topic())) {
            partitionLeader.put(topicAndPartition, pmd.leader());
          }
        }
      }
      Iterator<Entry<TopicAndPartition, TopicPartitionLag>> iter = noProgressMap.entrySet()
          .iterator();
      while (iter.hasNext()) {
        TopicAndPartition tp = iter.next().getKey();
        if (!topicSet.contains(tp.topic())) {
          iter.remove();
          logger.info("Remove non exist topic {} from noProgressMap", tp);
        }
      }
      break;
    } catch (Exception e) {
      logger.warn("Got exception to get metadata from broker=" + broker, e);
    }
  }
  logger.debug("partitionLeader: {}", partitionLeader);
}
 
Example #13
Source File: PulsarKafkaSimpleConsumer.java    From pulsar with Apache License 2.0 4 votes vote down vote up
@Override
public PulsarTopicMetadataResponse send(TopicMetadataRequest request) {
    List<String> topics = request.topics();
    PulsarTopicMetadataResponse response = new PulsarTopicMetadataResponse(admin, host, port, topics);
    return response;
}
 
Example #14
Source File: Kafka08PartitionDiscoverer.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
protected List<String> getAllTopics() {
	List<String> topics = new LinkedList<>();

	retryLoop: for (int retry = 0; retry < numRetries; retry++) {
		brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
			LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);

			try {
				// clear in case we have an incomplete list from previous tries
				topics.clear();

				for (TopicMetadata item : consumer.send(new TopicMetadataRequest(Collections.<String>emptyList())).topicsMetadata()) {
					if (item.errorCode() != ErrorMapping.NoError()) {
						// warn and try more brokers
						LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
							seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					topics.add(item.topic());
				}
				break retryLoop; // leave the loop through the brokers
			}
			catch (Exception e) {
				//validates seed brokers in case of a ClosedChannelException
				validateSeedBrokers(seedBrokerAddresses, e);
				LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
					seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
				LOG.debug("Detailed trace", e);

				// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
				try {
					Thread.sleep(500);
				} catch (InterruptedException e1) {
					// sleep shorter.
				}

				useNextAddressAsNewContactSeedBroker();
			}
		} // brokers loop
	} // retries loop

	return topics;
}
 
Example #15
Source File: Kafka08PartitionDiscoverer.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Send request to Kafka to get partitions for topics.
 *
 * @param topics The name of the topics.
 */
public List<KafkaTopicPartitionLeader> getPartitionLeadersForTopics(List<String> topics) {
	List<KafkaTopicPartitionLeader> partitions = new LinkedList<>();

	retryLoop: for (int retry = 0; retry < numRetries; retry++) {
		brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
			LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);

			try {
				// clear in case we have an incomplete list from previous tries
				partitions.clear();

				for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) {
					if (item.errorCode() != ErrorMapping.NoError()) {
						// warn and try more brokers
						LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
							seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					if (!topics.contains(item.topic())) {
						LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ...");

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					for (PartitionMetadata part : item.partitionsMetadata()) {
						Node leader = brokerToNode(part.leader());
						KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());
						KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader);
						partitions.add(pInfo);
					}
				}
				break retryLoop; // leave the loop through the brokers
			}
			catch (Exception e) {
				//validates seed brokers in case of a ClosedChannelException
				validateSeedBrokers(seedBrokerAddresses, e);
				LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
					seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
				LOG.debug("Detailed trace", e);

				// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
				try {
					Thread.sleep(500);
				} catch (InterruptedException e1) {
					// sleep shorter.
				}

				useNextAddressAsNewContactSeedBroker();
			}
		} // brokers loop
	} // retries loop

	return partitions;
}
 
Example #16
Source File: Kafka08PartitionDiscoverer.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
protected List<String> getAllTopics() {
	List<String> topics = new LinkedList<>();

	retryLoop: for (int retry = 0; retry < numRetries; retry++) {
		brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
			LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);

			try {
				// clear in case we have an incomplete list from previous tries
				topics.clear();

				for (TopicMetadata item : consumer.send(new TopicMetadataRequest(Collections.<String>emptyList())).topicsMetadata()) {
					if (item.errorCode() != ErrorMapping.NoError()) {
						// warn and try more brokers
						LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
							seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					topics.add(item.topic());
				}
				break retryLoop; // leave the loop through the brokers
			}
			catch (Exception e) {
				//validates seed brokers in case of a ClosedChannelException
				validateSeedBrokers(seedBrokerAddresses, e);
				LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
					seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
				LOG.debug("Detailed trace", e);

				// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
				try {
					Thread.sleep(500);
				} catch (InterruptedException e1) {
					// sleep shorter.
				}

				useNextAddressAsNewContactSeedBroker();
			}
		} // brokers loop
	} // retries loop

	return topics;
}
 
Example #17
Source File: KafkaStreamMetadataProvider.java    From incubator-pinot with Apache License 2.0 4 votes vote down vote up
/**
 * Fetches the number of partitions for this kafka stream
 * @param timeoutMillis
 * @return
 */
@Override
public synchronized int fetchPartitionCount(long timeoutMillis) {
  int unknownTopicReplyCount = 0;
  final int MAX_UNKNOWN_TOPIC_REPLY_COUNT = 10;
  int kafkaErrorCount = 0;
  final int MAX_KAFKA_ERROR_COUNT = 10;

  final long endTime = System.currentTimeMillis() + timeoutMillis;

  while (System.currentTimeMillis() < endTime) {
    // Try to get into a state where we're connected to Kafka
    while (!_currentState.isConnectedToKafkaBroker() && System.currentTimeMillis() < endTime) {
      _currentState.process();
    }

    if (endTime <= System.currentTimeMillis() && !_currentState.isConnectedToKafkaBroker()) {
      throw new TimeoutException(
          "Failed to get the partition count for topic " + _topic + " within " + timeoutMillis + " ms");
    }

    // Send the metadata request to Kafka
    TopicMetadataResponse topicMetadataResponse = null;
    try {
      topicMetadataResponse = _simpleConsumer.send(new TopicMetadataRequest(Collections.singletonList(_topic)));
    } catch (Exception e) {
      _currentState.handleConsumerException(e);
      continue;
    }

    final TopicMetadata topicMetadata = topicMetadataResponse.topicsMetadata().get(0);
    final short errorCode = topicMetadata.errorCode();

    if (errorCode == Errors.NONE.code()) {
      return topicMetadata.partitionsMetadata().size();
    } else if (errorCode == Errors.LEADER_NOT_AVAILABLE.code()) {
      // If there is no leader, it'll take some time for a new leader to be elected, wait 100 ms before retrying
      Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    } else if (errorCode == Errors.INVALID_TOPIC_EXCEPTION.code()) {
      throw new RuntimeException("Invalid topic name " + _topic);
    } else if (errorCode == Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) {
      if (MAX_UNKNOWN_TOPIC_REPLY_COUNT < unknownTopicReplyCount) {
        throw new RuntimeException("Topic " + _topic + " does not exist");
      } else {
        // Kafka topic creation can sometimes take some time, so we'll retry after a little bit
        unknownTopicReplyCount++;
        Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
      }
    } else {
      // Retry after a short delay
      kafkaErrorCount++;

      if (MAX_KAFKA_ERROR_COUNT < kafkaErrorCount) {
        throw exceptionForKafkaErrorCode(errorCode);
      }

      Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS);
    }
  }

  throw new TimeoutException();
}
 
Example #18
Source File: Kafka08PartitionDiscoverer.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Send request to Kafka to get partitions for topics.
 *
 * @param topics The name of the topics.
 */
public List<KafkaTopicPartitionLeader> getPartitionLeadersForTopics(List<String> topics) {
	List<KafkaTopicPartitionLeader> partitions = new LinkedList<>();

	retryLoop: for (int retry = 0; retry < numRetries; retry++) {
		brokersLoop: for (int arrIdx = 0; arrIdx < seedBrokerAddresses.length; arrIdx++) {
			LOG.info("Trying to get topic metadata from broker {} in try {}/{}", seedBrokerAddresses[currentContactSeedBrokerIndex], retry, numRetries);

			try {
				// clear in case we have an incomplete list from previous tries
				partitions.clear();

				for (TopicMetadata item : consumer.send(new TopicMetadataRequest(topics)).topicsMetadata()) {
					if (item.errorCode() != ErrorMapping.NoError()) {
						// warn and try more brokers
						LOG.warn("Error while getting metadata from broker {} to find partitions for {}. Error: {}.",
							seedBrokerAddresses[currentContactSeedBrokerIndex], topics.toString(), ErrorMapping.exceptionFor(item.errorCode()).getMessage());

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					if (!topics.contains(item.topic())) {
						LOG.warn("Received metadata from topic " + item.topic() + " even though it was not requested. Skipping ...");

						useNextAddressAsNewContactSeedBroker();
						continue brokersLoop;
					}

					for (PartitionMetadata part : item.partitionsMetadata()) {
						Node leader = brokerToNode(part.leader());
						KafkaTopicPartition ktp = new KafkaTopicPartition(item.topic(), part.partitionId());
						KafkaTopicPartitionLeader pInfo = new KafkaTopicPartitionLeader(ktp, leader);
						partitions.add(pInfo);
					}
				}
				break retryLoop; // leave the loop through the brokers
			}
			catch (Exception e) {
				//validates seed brokers in case of a ClosedChannelException
				validateSeedBrokers(seedBrokerAddresses, e);
				LOG.warn("Error communicating with broker {} to find partitions for {}. {} Message: {}",
					seedBrokerAddresses[currentContactSeedBrokerIndex], topics, e.getClass().getName(), e.getMessage());
				LOG.debug("Detailed trace", e);

				// we sleep a bit. Retrying immediately doesn't make sense in cases where Kafka is reorganizing the leader metadata
				try {
					Thread.sleep(500);
				} catch (InterruptedException e1) {
					// sleep shorter.
				}

				useNextAddressAsNewContactSeedBroker();
			}
		} // brokers loop
	} // retries loop

	return partitions;
}