Java Code Examples for org.apache.kafka.common.KafkaFuture#get()

The following examples show how to use org.apache.kafka.common.KafkaFuture#get() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TopicServiceImpl.java    From kafka-helmsman with MIT License 6 votes vote down vote up
/**
 * Transform a TopicDescription instance to ConfiguredTopic instance.
 *
 * @param td  an instance of TopicDescription
 * @param ktc a topic config future
 * @return an instance of ConfiguredTopic
 */
static ConfiguredTopic configuredTopic(TopicDescription td, KafkaFuture<Config> ktc) {
  int partitions = td.partitions().size();
  short replication = (short) td.partitions().iterator().next().replicas().size();
  try {
    Config tc = ktc.get();
    Map<String, String> configMap = tc
        .entries()
        .stream()
        .filter(TopicServiceImpl::isNonDefault)
        .collect(toMap(ConfigEntry::name, ConfigEntry::value));
    return new ConfiguredTopic(td.name(), partitions, replication, configMap);
  } catch (InterruptedException | ExecutionException e) {
    // TODO: FA-10109: Improve exception handling
    throw new RuntimeException(e);
  }
}
 
Example 2
Source File: KafkaStorage.java    From zipkin-storage-kafka with Apache License 2.0 6 votes vote down vote up
@Override public CheckResult check() {
  try {
    KafkaFuture<String> maybeClusterId = getAdminClient().describeCluster().clusterId();
    maybeClusterId.get(1, TimeUnit.SECONDS);
    KafkaStreams.State state = getAggregationStream().state();
    if (!state.isRunning()) {
      return CheckResult.failed(
          new IllegalStateException("Aggregation stream not running. " + state));
    }
    KafkaStreams.State traceStateStore = getTraceStorageStream().state();
    if (!traceStateStore.isRunning()) {
      return CheckResult.failed(
          new IllegalStateException("Store stream not running. " + traceStateStore));
    }
    KafkaStreams.State dependencyStateStore = getDependencyStorageStream().state();
    if (!dependencyStateStore.isRunning()) {
      return CheckResult.failed(
          new IllegalStateException("Store stream not running. " + dependencyStateStore));
    }
    return CheckResult.OK;
  } catch (Exception e) {
    return CheckResult.failed(e);
  }
}
 
Example 3
Source File: CreateTopics.java    From phoebus with Eclipse Public License 1.0 6 votes vote down vote up
/**
 * <p> Discover any currently active Kafka topics. Return a list of strings filled with any default topics that need to be created.
 * @param client
 * @return topics_to_create <code>List</code> of <code>Strings</code> with all the topic names that need to be created.
 *                           Returns <code>null</code> if none need to be created.
 */
private static List<String> discoverTopics(final AdminClient client, final List<String> topics_to_discover)
{
    final List<String> topics_to_create = new ArrayList<>();

    // Discover what topics currently exist.
    try
    {
        final ListTopicsResult res = client.listTopics();
        final KafkaFuture<Set<String>> topics = res.names();
        final Set<String> topic_names = topics.get();

        for (String topic : topics_to_discover)
        {
            if ( ! topic_names.contains(topic))
                topics_to_create.add(topic);
        }
    }
    catch (Exception ex)
    {
        logger.log(Level.WARNING, "Unable to list topics. Automatic topic detection failed.", ex);
    }

    return topics_to_create;
}
 
Example 4
Source File: CreateTopics.java    From phoebus with Eclipse Public License 1.0 6 votes vote down vote up
/** Create a topic for each of the topics in the passed list.
 *  @param client {@link AdminClient}
 *  @param compact If the topics should be compacted.
 *  @param topics_to_create {@link List} of {@link String}s filled with the names of topics to create.
 */
private static void createTopics(final AdminClient client, final boolean compact, final List<String> topics_to_create)
{
    // Create the new topics locally.
    final List<NewTopic> new_topics = new ArrayList<>();
    for (String topic : topics_to_create)
    {
            logger.info("Creating topic '" + topic + "'");
            new_topics.add(createTopic(client, compact, topic));
    }
    // Create the new topics in the Kafka server.
    try
    {
        final CreateTopicsResult res = client.createTopics(new_topics);
        final KafkaFuture<Void> future = res.all();
        future.get();
    }
    catch (Exception ex)
    {
        logger.log(Level.WARNING, "Attempt to create topics failed", ex);
    }
}
 
Example 5
Source File: ConsumeService.java    From kafka-monitor with Apache License 2.0 6 votes vote down vote up
@Override
public synchronized void start() {
  if (_running.compareAndSet(false, true)) {
    _consumeThread.start();
    LOG.info("{}/ConsumeService started.", _name);

    Sensor topicPartitionCount = metrics.sensor("topic-partitions");
    DescribeTopicsResult describeTopicsResult = _adminClient.describeTopics(Collections.singleton(_topic));
    Map<String, KafkaFuture<TopicDescription>> topicResultValues = describeTopicsResult.values();
    KafkaFuture<TopicDescription> topicDescriptionKafkaFuture = topicResultValues.get(_topic);
    TopicDescription topicDescription = null;
    try {
      topicDescription = topicDescriptionKafkaFuture.get();
    } catch (InterruptedException | ExecutionException e) {
      LOG.error("Exception occurred while getting the topicDescriptionKafkaFuture for topic: {}", _topic, e);
    }
    @SuppressWarnings("ConstantConditions")
    double partitionCount = topicDescription.partitions().size();
    topicPartitionCount.add(
        new MetricName("topic-partitions-count", METRIC_GROUP_NAME, "The total number of partitions for the topic.", tags), new Total(partitionCount));
  }
}
 
Example 6
Source File: KafkaTopicClientImplTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("unchecked")
private static <T> KafkaFuture<T> failedFuture(final Exception cause) {
  try {
    final KafkaFuture<T> future = mock(KafkaFuture.class);
    future.get();
    expectLastCall().andThrow(new ExecutionException(cause));
    replay(future);
    return future;
  } catch (final Exception e) {
    throw new RuntimeException(e);
  }
}
 
Example 7
Source File: DefaultCollector.java    From paraflow with Apache License 2.0 5 votes vote down vote up
@Override
public void createTopic(String topicName, int partitionsNum, short replicationFactor)
{
    NewTopic newTopic = new NewTopic(topicName, partitionsNum, replicationFactor);
    CreateTopicsResult result = kafkaAdminClient.createTopics(Collections.singletonList(newTopic));
    KafkaFuture future = result.values().get(topicName);
    try {
        future.get();
    }
    catch (InterruptedException | ExecutionException e) {
        e.printStackTrace();
    }
}
 
Example 8
Source File: KafkaBinderTests.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 5 votes vote down vote up
private int invokePartitionSize(String topic) throws Throwable {

		DescribeTopicsResult describeTopicsResult = adminClient
				.describeTopics(Collections.singletonList(topic));
		KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult.all();
		Map<String, TopicDescription> stringTopicDescriptionMap = all
				.get(DEFAULT_OPERATION_TIMEOUT, TimeUnit.SECONDS);
		TopicDescription topicDescription = stringTopicDescriptionMap.get(topic);
		return topicDescription.partitions().size();
	}
 
Example 9
Source File: ClusterTopicManipulationService.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
/**
 * Waits if necessary for this future to complete and gets the future in a blocking fashion.
 * returns Map<String, TopicDescription> if the future succeeds, which occurs only if all the topic descriptions are successful.
 * @param adminClient administrative client for Kafka, supporting managing and inspecting topics, brokers, configurations and ACLs.
 * @param topicNames Collection of topic names
 * @return Map<String, TopicDescription> if describe topic succeeds.
 */
private static Map<String, TopicDescription> describeTopics(AdminClient adminClient, Collection<String> topicNames)
    throws InterruptedException, ExecutionException {
  KafkaFuture<Map<String, TopicDescription>> mapKafkaFuture = adminClient.describeTopics(topicNames).all();
  LOGGER.debug("describeTopics future: {}", mapKafkaFuture);
  LOGGER.debug("describeTopics: {}", mapKafkaFuture.get());

  return mapKafkaFuture.get();
}
 
Example 10
Source File: ProduceService.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized void start() {
  if (_running.compareAndSet(false, true)) {
    try {
      KafkaFuture<Map<String, TopicDescription>> topicDescriptionsFuture = _adminClient.describeTopics(Collections.singleton(_topic)).all();
      Map<String, TopicDescription> topicDescriptions = topicDescriptionsFuture.get();
      int partitionNum = topicDescriptions.get(_topic).partitions().size();
      initializeStateForPartitions(partitionNum);
      _handleNewPartitionsExecutor.scheduleWithFixedDelay(new NewPartitionHandler(), 1, 30, TimeUnit.SECONDS);
      LOG.info("{}/ProduceService started", _name);
    } catch (InterruptedException | UnknownTopicOrPartitionException | ExecutionException e) {
      LOG.error("Exception occurred while starting produce service for topic: {}", _topic, e);
    }
  }
}
 
Example 11
Source File: KafkaSender.java    From zipkin-reporter-java with Apache License 2.0 5 votes vote down vote up
/** Ensures there are no problems reading metadata about the topic. */
@Override public CheckResult check() {
  try {
    KafkaFuture<String> maybeClusterId = getAdminClient().describeCluster().clusterId();
    maybeClusterId.get(1, TimeUnit.SECONDS);
    return CheckResult.OK;
  } catch (Exception e) {
    return CheckResult.failed(e);
  }
}
 
Example 12
Source File: KafkaFutures.java    From data-highway with Apache License 2.0 4 votes vote down vote up
@SneakyThrows({ InterruptedException.class, ExecutionException.class })
static <T> T join(KafkaFuture<T> future) {
  return future.get();
}
 
Example 13
Source File: KafkaTopicProvisioner.java    From spring-cloud-stream-binder-kafka with Apache License 2.0 4 votes vote down vote up
private ConsumerDestination doProvisionConsumerDestination(final String name,
		final String group,
		ExtendedConsumerProperties<KafkaConsumerProperties> properties) {

	if (properties.getExtension().isDestinationIsPattern()) {
		Assert.isTrue(!properties.getExtension().isEnableDlq(),
				"enableDLQ is not allowed when listening to topic patterns");
		if (logger.isDebugEnabled()) {
			logger.debug("Listening to a topic pattern - " + name
					+ " - no provisioning performed");
		}
		return new KafkaConsumerDestination(name);
	}
	KafkaTopicUtils.validateTopicName(name);
	boolean anonymous = !StringUtils.hasText(group);
	Assert.isTrue(!anonymous || !properties.getExtension().isEnableDlq(),
			"DLQ support is not available for anonymous subscriptions");
	if (properties.getInstanceCount() == 0) {
		throw new IllegalArgumentException("Instance count cannot be zero");
	}
	int partitionCount = properties.getInstanceCount() * properties.getConcurrency();
	ConsumerDestination consumerDestination = new KafkaConsumerDestination(name);
	try (AdminClient adminClient = createAdminClient()) {
		createTopic(adminClient, name, partitionCount,
				properties.getExtension().isAutoRebalanceEnabled(),
				properties.getExtension().getTopic());
		if (this.configurationProperties.isAutoCreateTopics()) {
			DescribeTopicsResult describeTopicsResult = adminClient
					.describeTopics(Collections.singletonList(name));
			KafkaFuture<Map<String, TopicDescription>> all = describeTopicsResult
					.all();
			try {
				Map<String, TopicDescription> topicDescriptions = all
						.get(this.operationTimeout, TimeUnit.SECONDS);
				TopicDescription topicDescription = topicDescriptions.get(name);
				int partitions = topicDescription.partitions().size();
				consumerDestination = createDlqIfNeedBe(adminClient, name, group,
						properties, anonymous, partitions);
				if (consumerDestination == null) {
					consumerDestination = new KafkaConsumerDestination(name,
							partitions);
				}
			}
			catch (Exception ex) {
				throw new ProvisioningException("provisioning exception", ex);
			}
		}
	}
	return consumerDestination;
}
 
Example 14
Source File: AdminClientTest.java    From javabase with Apache License 2.0 4 votes vote down vote up
/**
 * delete the given topics
 * 
 * @param client
 */
public static void deleteTopics(AdminClient client)
        throws ExecutionException, InterruptedException {
    KafkaFuture<Void> futures = client.deleteTopics(Arrays.asList(TEST_TOPIC)).all();
    futures.get();
}