kafka.javaapi.TopicMetadata Java Examples

The following examples show how to use kafka.javaapi.TopicMetadata. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaPartitionReaderUnitTest.java    From Scribengin with GNU Affero General Public License v3.0 6 votes vote down vote up
private void readFromPartition(String consumerName, int partition, int maxRead) throws Exception {
  KafkaTool kafkaTool = new KafkaTool(consumerName, cluster.getZKConnect());
  kafkaTool.connect();
  TopicMetadata topicMetadata = kafkaTool.findTopicMetadata("hello");
  PartitionMetadata partitionMetadata = findPartition(topicMetadata.partitionsMetadata(), partition);
  KafkaPartitionReader partitionReader = 
      new KafkaPartitionReader(consumerName, cluster.getZKConnect(), "hello", partitionMetadata);
  List<byte[]> messages = partitionReader.fetch(10000, maxRead);
  for(int i = 0; i < messages.size(); i++) {
    byte[] message = messages.get(i) ;
    System.out.println((i + 1) + ". " + new String(message));
  }
  partitionReader.commit();
  partitionReader.close();
  kafkaTool.close();
}
 
Example #2
Source File: Kafka08ConsumerClient.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, String... selectedTopics) {
  log.info(String.format("Fetching topic metadata from broker %s", broker));
  SimpleConsumer consumer = null;
  try {
    consumer = getSimpleConsumer(broker);
    for (int i = 0; i < this.fetchTopicRetries; i++) {
      try {
        return consumer.send(new TopicMetadataRequest(Arrays.asList(selectedTopics))).topicsMetadata();
      } catch (Exception e) {
        log.warn(String.format("Fetching topic metadata from broker %s has failed %d times.", broker, i + 1), e);
        try {
          Thread.sleep((long) ((i + Math.random()) * 1000));
        } catch (InterruptedException e2) {
          log.warn("Caught InterruptedException: " + e2);
        }
      }
    }
  } finally {
    if (consumer != null) {
      consumer.close();
    }
  }
  return null;
}
 
Example #3
Source File: Kafka08ConsumerClient.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
  List<KafkaPartition> partitions = Lists.newArrayList();

  for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
    if (null == partitionMetadata) {
      log.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
      return Collections.emptyList();
    }
    if (null == partitionMetadata.leader()) {
      log.error("Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada="
          + partitionMetadata);
      return Collections.emptyList();
    }
    partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
        .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
        .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
  }
  return partitions;
}
 
Example #4
Source File: Kafka08ConsumerClient.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private void refreshTopicMetadata(KafkaPartition partition) {
  for (String broker : this.brokers) {
    List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
    if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
      TopicMetadata topicMetadata = topicMetadataList.get(0);
      for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
        if (partitionMetadata.partitionId() == partition.getId()) {
          partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(), partitionMetadata
              .leader().port());
          break;
        }
      }
      break;
    }
  }
}
 
Example #5
Source File: KafkaWrapper.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private void refreshTopicMetadata(KafkaPartition partition) {
  for (String broker : KafkaWrapper.this.getBrokers()) {
    List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker, partition.getTopicName());
    if (topicMetadataList != null && !topicMetadataList.isEmpty()) {
      TopicMetadata topicMetadata = topicMetadataList.get(0);
      for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
        if (partitionMetadata.partitionId() == partition.getId()) {
          partition.setLeader(partitionMetadata.leader().id(), partitionMetadata.leader().host(),
              partitionMetadata.leader().port());
          break;
        }
      }
      break;
    }
  }
}
 
Example #6
Source File: KafkaWrapper.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, String... selectedTopics) {
  LOG.info(String.format("Fetching topic metadata from broker %s", broker));
  SimpleConsumer consumer = null;
  try {
    consumer = getSimpleConsumer(broker);
    for (int i = 0; i < this.fetchTopicRetries; i++) {
      try {
        return consumer.send(new TopicMetadataRequest(Arrays.asList(selectedTopics))).topicsMetadata();
      } catch (Exception e) {
        LOG.warn(String.format("Fetching topic metadata from broker %s has failed %d times.", broker, i + 1), e);
        try {
          Thread.sleep((long) ((i + Math.random()) * 1000));
        } catch (InterruptedException e2) {
          LOG.warn("Caught InterruptedException: " + e2);
        }
      }
    }
  } finally {
    if (consumer != null) {
      consumer.close();
    }
  }
  return null;
}
 
Example #7
Source File: KafkaWrapper.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private List<TopicMetadata> fetchTopicMetadataFromBroker(String broker, List<Pattern> blacklist,
    List<Pattern> whitelist) {

  List<TopicMetadata> topicMetadataList = fetchTopicMetadataFromBroker(broker);
  if (topicMetadataList == null) {
    return null;
  }

  List<TopicMetadata> filteredTopicMetadataList = Lists.newArrayList();
  for (TopicMetadata topicMetadata : topicMetadataList) {
    if (DatasetFilterUtils.survived(topicMetadata.topic(), blacklist, whitelist)) {
      filteredTopicMetadataList.add(topicMetadata);
    }
  }
  return filteredTopicMetadataList;
}
 
Example #8
Source File: KafkaWrapper.java    From incubator-gobblin with Apache License 2.0 6 votes vote down vote up
private List<KafkaPartition> getPartitionsForTopic(TopicMetadata topicMetadata) {
  List<KafkaPartition> partitions = Lists.newArrayList();

  for (PartitionMetadata partitionMetadata : topicMetadata.partitionsMetadata()) {
    if (null == partitionMetadata) {
      LOG.error("Ignoring topic with null partition metadata " + topicMetadata.topic());
      return Collections.emptyList();
    }
    if (null == partitionMetadata.leader()) {
      LOG.error(
          "Ignoring topic with null partition leader " + topicMetadata.topic() + " metatada=" + partitionMetadata);
      return Collections.emptyList();
    }
    partitions.add(new KafkaPartition.Builder().withId(partitionMetadata.partitionId())
        .withTopicName(topicMetadata.topic()).withLeaderId(partitionMetadata.leader().id())
        .withLeaderHostAndPort(partitionMetadata.leader().host(), partitionMetadata.leader().port()).build());
  }
  return partitions;
}
 
Example #9
Source File: LegacyKafkaClient.java    From secor with Apache License 2.0 6 votes vote down vote up
@Override
public int getNumPartitions(String topic) {
    SimpleConsumer consumer = null;
    try {
        consumer = createConsumer(
            mConfig.getKafkaSeedBrokerHost(),
            mConfig.getKafkaSeedBrokerPort(),
            "partitionLookup");
        List<String> topics = new ArrayList<String>();
        topics.add(topic);
        TopicMetadataRequest request = new TopicMetadataRequest(topics);
        TopicMetadataResponse response = consumer.send(request);
        if (response.topicsMetadata().size() != 1) {
            throw new RuntimeException("Expected one metadata for topic " + topic + " found " +
                response.topicsMetadata().size());
        }
        TopicMetadata topicMetadata = response.topicsMetadata().get(0);
        return topicMetadata.partitionsMetadata().size();
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
}
 
Example #10
Source File: KafkaTool.java    From Scribengin with GNU Affero General Public License v3.0 6 votes vote down vote up
public TopicMetadata findTopicMetadata(final String topic, int retries) throws Exception {
  Operation<TopicMetadata> findTopicOperation = new Operation<TopicMetadata>() {
    @Override
    public TopicMetadata execute() throws Exception {
      List<String> topics = Collections.singletonList(topic);
      TopicMetadataRequest req = new TopicMetadataRequest(topics);
      TopicMetadataResponse resp = consumer.send(req);

      List<TopicMetadata> topicMetadatas = resp.topicsMetadata();
      if (topicMetadatas.size() != 1) {
        throw new Exception("Expect to find 1 topic " + topic + ", but found " + topicMetadatas.size());
      }

      return topicMetadatas.get(0);
    }
  };
  return execute(findTopicOperation, retries);
}
 
Example #11
Source File: PulsarTopicMetadataResponse.java    From pulsar with Apache License 2.0 6 votes vote down vote up
@Override
public List<TopicMetadata> topicsMetadata() {
    List<TopicMetadata> metadataList = Lists.newArrayList();
    topics.forEach(topic -> {
        try {
            int partitions;
            partitions = admin.topics().getPartitionedTopicMetadata(topic).partitions;
            if (partitions > 0) {
                for (int partition = 0; partition < partitions; partition++) {
                    String topicName = TopicName.get(topic).getPartition(partition).toString();
                    metadataList.add(new PulsarTopicMetadata(hostUrl, port, topicName));
                }
            } else {
                metadataList.add(new PulsarTopicMetadata(hostUrl, port, topic));
            }
        } catch (PulsarAdminException e) {
            log.error("Failed to get partitioned metadata for {}", topic, e);
            throw new RuntimeException("Failed to get partitioned-metadata", e);
        }
    });
    return metadataList;
}
 
Example #12
Source File: AckKafkaWriterTestRunner.java    From Scribengin with GNU Affero General Public License v3.0 6 votes vote down vote up
public void run() {
  try {
    while (!exit) {
      KafkaTool kafkaTool = new KafkaTool(topic, cluster.getZKConnect());
      kafkaTool.connect();
      TopicMetadata topicMeta = kafkaTool.findTopicMetadata(topic);
      PartitionMetadata partitionMeta = findPartition(topicMeta, partition);
      Broker partitionLeader = partitionMeta.leader();
      Server kafkaServer = cluster.findKafkaServerByPort(partitionLeader.port());
      System.out.println("Shutdown kafka server " + kafkaServer.getPort());
      kafkaServer.shutdown();
      failureCount++;
      Thread.sleep(sleepBeforeRestart);
      kafkaServer.start();
      kafkaTool.close();
      Thread.sleep(10000); //wait to make sure that the kafka server start
    }
  } catch (Exception e) {
    e.printStackTrace();
  }
  synchronized (this) {
    notify();
  }
}
 
Example #13
Source File: KafkaSource.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
void init(StorageDescriptor descriptor) throws Exception {
  this.descriptor = descriptor;
  KafkaTool kafkaTool = new KafkaTool(descriptor.attribute("name"), descriptor.attribute("zk.connect"));
  kafkaTool.connect();
  TopicMetadata topicMetdadata = kafkaTool.findTopicMetadata(descriptor.attribute("topic"));
  List<PartitionMetadata> partitionMetadatas = topicMetdadata.partitionsMetadata();
  for(int i = 0; i < partitionMetadatas.size(); i++) {
    PartitionMetadata partitionMetadata = partitionMetadatas.get(i);
    KafkaSourceStream sourceStream = new KafkaSourceStream(descriptor, partitionMetadata);
    sourceStreams.put(sourceStream.getId(), sourceStream);
  }
  kafkaTool.close();
}
 
Example #14
Source File: ScribenginAM.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
private void getMetaData(String topic) {
  LOG.info("inside getMetaData"); //xxx
  LOG.info("seedBrokerList" + this.brokerList); //xxx

  for (HostPort seed: brokerList) {
    SimpleConsumer consumer = new SimpleConsumer(
        seed.getHost(),
        seed.getPort(),
        10000,   // timeout
        64*1024, // bufferSize
        "metaLookup"  // clientId
        );
    List <String> topicList = Collections.singletonList(topic);

    TopicMetadataRequest req = new TopicMetadataRequest(topicList);
    kafka.javaapi.TopicMetadataResponse resp = consumer.send(req);
    List<TopicMetadata> metaDataList = resp.topicsMetadata();
    LOG.info("metaDataList: " + metaDataList); //xxxx

    for (TopicMetadata m: metaDataList) {
      LOG.info("inside the metadatalist loop"); //xxx
      LOG.info("m partitionsMetadata: " + m.partitionsMetadata()); //xxx
      for (PartitionMetadata part : m.partitionsMetadata()) {
        LOG.info("inside the partitionmetadata loop"); //xxx
        storeMetadata(topic, part);
      }
    }
  }
}
 
Example #15
Source File: KafkaTopicService.java    From Decision with Apache License 2.0 5 votes vote down vote up
@Override
public Integer getNumPartitionsForTopic(String topic){
    TopicMetadataRequest topicRequest = new TopicMetadataRequest(Arrays.asList(topic));
    TopicMetadataResponse topicResponse = simpleConsumer.send(topicRequest);
    for (TopicMetadata topicMetadata : topicResponse.topicsMetadata()) {
        if (topic.equals(topicMetadata.topic())) {
            int partitionSize = topicMetadata.partitionsMetadata().size();
            logger.debug("Partition size found ({}) for {} topic", partitionSize, topic);
            return partitionSize;
        }
    }
    logger.warn("Metadata info not found!. TOPIC {}", topic);
    return null;
}
 
Example #16
Source File: KafkaTool.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
public PartitionMetadata findPartitionMetadata(String topic, int partition) throws Exception {
  TopicMetadata topicMetadata = findTopicMetadata(topic);
  for (PartitionMetadata sel : topicMetadata.partitionsMetadata()) {
    if (sel.partitionId() == partition)
      return sel;
  }
  return null;
}
 
Example #17
Source File: KafkaClusterTool.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
Server findLeader(String topic, int partition) throws Exception {
  KafkaTool kafkaTool = new KafkaTool("KafkaPartitionLeaderKiller", cluster.getZKConnect());
  kafkaTool.connect();
  TopicMetadata topicMeta = kafkaTool.findTopicMetadata(topic);
  PartitionMetadata partitionMeta = findPartition(topicMeta, partition);
  Broker partitionLeader = partitionMeta.leader();
  Server kafkaServer = cluster.findKafkaServerByPort(partitionLeader.port());
  System.out.println("Shutdown kafka server " + kafkaServer.getPort());
  kafkaTool.close();
  return kafkaServer;
}
 
Example #18
Source File: LegacyKafkaClient.java    From secor with Apache License 2.0 5 votes vote down vote up
private HostAndPort findLeader(TopicPartition topicPartition) {
    SimpleConsumer consumer = null;
    try {
        LOG.debug("looking up leader for topic {} partition {}", topicPartition.getTopic(), topicPartition.getPartition());
        consumer = createConsumer(
            mConfig.getKafkaSeedBrokerHost(),
            mConfig.getKafkaSeedBrokerPort(),
            "leaderLookup");
        List<String> topics = new ArrayList<String>();
        topics.add(topicPartition.getTopic());
        TopicMetadataRequest request = new TopicMetadataRequest(topics);
        TopicMetadataResponse response = consumer.send(request);

        List<TopicMetadata> metaData = response.topicsMetadata();
        for (TopicMetadata item : metaData) {
            for (PartitionMetadata part : item.partitionsMetadata()) {
                if (part.partitionId() == topicPartition.getPartition()) {
                    return HostAndPort.fromParts(part.leader().host(), part.leader().port());
                }
            }
        }
    } finally {
        if (consumer != null) {
            consumer.close();
        }
    }
    return null;
}
 
Example #19
Source File: KafkaClusterTool.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
PartitionMetadata findPartition(TopicMetadata topicMetadata, int partition) {
  for (PartitionMetadata sel : topicMetadata.partitionsMetadata()) {
    if (sel.partitionId() == partition)
      return sel;
  }
  throw new RuntimeException("Cannot find the partition " + partition);
}
 
Example #20
Source File: KafkaMessageSendTool.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
public void doSend() throws Exception {
  System.out.println("KafkaMessageSendTool: Start sending the message to kafka");
  runDuration.start();
  ExecutorService writerService = Executors.newFixedThreadPool(topicConfig.numberOfPartition);
  KafkaTool kafkaTool = new KafkaTool("KafkaTool", topicConfig.zkConnect);
  kafkaTool.connect();
  String kafkaConnects = kafkaTool.getKafkaBrokerList();
  //TODO: add option to delete topic if it exists
  //kafkaTool.deleteTopic(topicConfig.topic);
  if(!kafkaTool.topicExits(topicConfig.topic)) {
    kafkaTool.createTopic(topicConfig.topic, topicConfig.replication, topicConfig.numberOfPartition);
  }
 
  TopicMetadata topicMetadata = kafkaTool.findTopicMetadata(topicConfig.topic);
  List<PartitionMetadata> partitionMetadataHolder = topicMetadata.partitionsMetadata();
  for (PartitionMetadata sel : partitionMetadataHolder) {
    PartitionMessageWriter writer = new PartitionMessageWriter(sel, kafkaConnects);
    writers.put(sel.partitionId(), writer);
    writerService.submit(writer);
  }

  writerService.shutdown();
  writerService.awaitTermination(topicConfig.producerConfig.maxDuration, TimeUnit.MILLISECONDS);
  if (!writerService.isTerminated()) {
    writerService.shutdownNow();
  }
  kafkaTool.close();
  runDuration.stop();
}
 
Example #21
Source File: AckKafkaWriterTestRunner.java    From Scribengin with GNU Affero General Public License v3.0 5 votes vote down vote up
PartitionMetadata findPartition(TopicMetadata topicMetadata, int partion) {
  for (PartitionMetadata sel : topicMetadata.partitionsMetadata()) {
    if (sel.partitionId() == partition)
      return sel;
  }
  throw new RuntimeException("Cannot find the partition " + partition);
}
 
Example #22
Source File: Kafka08ConsumerClient.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
@Override
public List<KafkaTopic> getTopics() {
  List<TopicMetadata> topicMetadataList = getFilteredMetadataList();

  List<KafkaTopic> filteredTopics = Lists.newArrayList();
  for (TopicMetadata topicMetadata : topicMetadataList) {
    List<KafkaPartition> partitions = getPartitionsForTopic(topicMetadata);
    filteredTopics.add(new KafkaTopic(topicMetadata.topic(), partitions));
  }
  return filteredTopics;
}
 
Example #23
Source File: KafkaWrapper.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private List<TopicMetadata> getFilteredMetadataList(List<Pattern> blacklist, List<Pattern> whitelist) {
  List<TopicMetadata> filteredTopicMetadataList = Lists.newArrayList();

  //Try all brokers one by one, until successfully retrieved topic metadata (topicMetadataList is non-null)
  for (String broker : KafkaWrapper.this.getBrokers()) {
    filteredTopicMetadataList = fetchTopicMetadataFromBroker(broker, blacklist, whitelist);
    if (filteredTopicMetadataList != null) {
      return filteredTopicMetadataList;
    }
  }

  throw new RuntimeException(
      "Fetching topic metadata from all brokers failed. See log warning for more information.");
}
 
Example #24
Source File: KafkaWrapper.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
@Override
public List<KafkaTopic> getFilteredTopics(List<Pattern> blacklist, List<Pattern> whitelist) {
  List<TopicMetadata> topicMetadataList = getFilteredMetadataList(blacklist, whitelist);

  List<KafkaTopic> filteredTopics = Lists.newArrayList();
  for (TopicMetadata topicMetadata : topicMetadataList) {
    List<KafkaPartition> partitions = getPartitionsForTopic(topicMetadata);
    filteredTopics.add(new KafkaTopic(topicMetadata.topic(), partitions));
  }
  return filteredTopics;
}
 
Example #25
Source File: KafkaInputFormat.java    From HiveKa with Apache License 2.0 5 votes vote down vote up
public List<TopicMetadata> filterWhitelistTopics(
		List<TopicMetadata> topicMetadataList,
		HashSet<String> whiteListTopics) {
	ArrayList<TopicMetadata> filteredTopics = new ArrayList<TopicMetadata>();
	String regex = createTopicRegEx(whiteListTopics);
	for (TopicMetadata topicMetadata : topicMetadataList) {
		if (Pattern.matches(regex, topicMetadata.topic())) {
			filteredTopics.add(topicMetadata);
		} else {
			log.info("Discarding topic : " + topicMetadata.topic());
		}
	}
	return filteredTopics;
}
 
Example #26
Source File: KafkaInputFormat.java    From HiveKa with Apache License 2.0 5 votes vote down vote up
/**
 * Gets the metadata from Kafka
 * 
 * @param conf
 * @return
 */
public List<TopicMetadata> getKafkaMetadata(JobConf conf) {
	ArrayList<String> metaRequestTopics = new ArrayList<String>();
	String brokerString = getKafkaBrokers(conf);
	if (brokerString.isEmpty())
		throw new InvalidParameterException("kafka.brokers must contain at least one node");
               List<String> brokers = Arrays.asList(brokerString.split("\\s*,\\s*"));
	Collections.shuffle(brokers);
	boolean fetchMetaDataSucceeded = false;
	int i = 0;
	List<TopicMetadata> topicMetadataList = null;
	Exception savedException = null;
	while (i < brokers.size() && !fetchMetaDataSucceeded) {
     log.info("Trying to connect to broker: " + brokers.get(i));
		SimpleConsumer consumer = createConsumer(conf, brokers.get(i));
		log.info(String.format("Fetching metadata from broker %s with client id %s for %d topic(s) %s",
		brokers.get(i), consumer.clientId(), metaRequestTopics.size(), metaRequestTopics));
		try {
			topicMetadataList = consumer.send(new TopicMetadataRequest(metaRequestTopics)).topicsMetadata();
			fetchMetaDataSucceeded = true;
		} catch (Exception e) {
			savedException = e;
			log.warn(String.format("Fetching topic metadata with client id %s for topics [%s] from broker [%s] failed",
				consumer.clientId(), metaRequestTopics, brokers.get(i)), e);
		} finally {
			consumer.close();
			i++;
		}
	}
	if (!fetchMetaDataSucceeded) {
		throw new RuntimeException("Failed to obtain metadata!", savedException);
	}
	return topicMetadataList;
}
 
Example #27
Source File: Kafka08ConsumerClient.java    From incubator-gobblin with Apache License 2.0 5 votes vote down vote up
private List<TopicMetadata> getFilteredMetadataList() {
  //Try all brokers one by one, until successfully retrieved topic metadata (topicMetadataList is non-null)
  for (String broker : this.brokers) {
    List<TopicMetadata> filteredTopicMetadataList = fetchTopicMetadataFromBroker(broker);
    if (filteredTopicMetadataList != null) {
      return filteredTopicMetadataList;
    }
  }

  throw new RuntimeException("Fetching topic metadata from all brokers failed. See log warning for more information.");
}
 
Example #28
Source File: KafkaValidationUtil08.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Override
public int getPartitionCount(
    String metadataBrokerList,
    String topic,
    Map<String, Object> kafkaClientConfigs,
    int messageSendMaxRetries,
    long retryBackoffMs
) throws StageException {
  List<HostAndPort> kafkaBrokers = getKafkaBrokers(metadataBrokerList);
  TopicMetadata topicMetadata;
  try {
    topicMetadata = KafkaValidationUtil08.getTopicMetadata(
        kafkaBrokers,
        topic,
        messageSendMaxRetries,
        retryBackoffMs
    );
    if (topicMetadata == null) {
      // Could not get topic metadata from any of the supplied brokers
      throw new StageException(KafkaErrors.KAFKA_03, topic, metadataBrokerList);
    }
    if (topicMetadata.errorCode() == ErrorMapping.UnknownTopicOrPartitionCode()) {
      // Topic does not exist
      throw new StageException(KafkaErrors.KAFKA_04, topic);
    }
    if (topicMetadata.errorCode() != 0) {
      // Topic metadata returned error code other than ErrorMapping.UnknownTopicOrPartitionCode()
      throw new StageException(KafkaErrors.KAFKA_03, topic, metadataBrokerList);
    }
  } catch (IOException e) {
    LOG.error(KafkaErrors.KAFKA_11.getMessage(), topic, kafkaBrokers, e.toString(), e);
    throw new StageException(KafkaErrors.KAFKA_11, topic, kafkaBrokers, e.toString());
  }
  return topicMetadata.partitionsMetadata().size();
}
 
Example #29
Source File: AbstractExactlyOnceKafkaOutputOperator.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
private void initializeLastProcessingOffset()
{
  // read last received kafka message
  TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic());

  if (tm == null) {
    throw new RuntimeException("Failed to retrieve topic metadata");
  }

  partitionNum = tm.partitionsMetadata().size();

  lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum);

  for (PartitionMetadata pm : tm.partitionsMetadata()) {

    String leadBroker = pm.leader().host();
    int port = pm.leader().port();
    String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId();
    SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName);

    long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName);

    FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build();

    FetchResponse fetchResponse = consumer.fetch(req);
    for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) {

      Message m = messageAndOffset.message();

      ByteBuffer payload = m.payload();
      ByteBuffer key = m.key();
      byte[] valueBytes = new byte[payload.limit()];
      byte[] keyBytes = new byte[key.limit()];
      payload.get(valueBytes);
      key.get(keyBytes);
      lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes));
    }
  }
}
 
Example #30
Source File: KafkaMetadataUtil.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
/**
 * @param brokerList brokers in same cluster
 * @param topic
 * @return Get the partition metadata list for the specific topic via the brokerList <br>
 * null if topic is not found
 */
public static List<PartitionMetadata> getPartitionsForTopic(Set<String> brokerList, String topic)
{
  TopicMetadata tmd = getTopicMetadata(brokerList, topic);
  if (tmd == null) {
    return null;
  }
  return tmd.partitionsMetadata();
}