org.apache.kafka.clients.admin.DescribeClusterOptions Java Examples

The following examples show how to use org.apache.kafka.clients.admin.DescribeClusterOptions. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaHealthIndicator.java    From kafka-graphs with Apache License 2.0 5 votes vote down vote up
@Override
public Mono<Health> health() {
    Health.Builder builder = new Health.Builder();
    Properties properties = new Properties();
    properties.put("bootstrap.servers", props.getBootstrapServers());
    try (AdminClient adminClient = AdminClient.create(properties)) {
        DescribeClusterResult result = adminClient.describeCluster(new DescribeClusterOptions().timeoutMs(3000));
        builder.withDetail("clusterId", result.clusterId().get());
        builder.up();
    } catch (Exception e) {
        builder.down();
    }
    return Mono.just(builder.build());
}
 
Example #2
Source File: KafkaAdmin.java    From feeyo-redisproxy with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
/**
 * 获取kafka集群配置信息
 */
public Collection<Node> getClusterNodes() {
	try {
		DescribeClusterOptions dco = new DescribeClusterOptions();
		dco.timeoutMs(5 * 1000);
		DescribeClusterResult dcr = adminClient.describeCluster(dco);
		return dcr.nodes().get();
	} catch (Exception e) {
		return null;
	}
}
 
Example #3
Source File: KafkaHealthIndicator.java    From micronaut-kafka with Apache License 2.0 4 votes vote down vote up
@Override
public Flowable<HealthResult> getResult() {
    DescribeClusterResult result = adminClient.describeCluster(
            new DescribeClusterOptions().timeoutMs(
                    (int) defaultConfiguration.getHealthTimeout().toMillis()
            )
    );

    Flowable<String> clusterId = Flowable.fromFuture(result.clusterId());
    Flowable<Collection<Node>> nodes = Flowable.fromFuture(result.nodes());
    Flowable<Node> controller = Flowable.fromFuture(result.controller());

    return controller.switchMap(node -> {
        String brokerId = node.idString();
        ConfigResource configResource = new ConfigResource(ConfigResource.Type.BROKER, brokerId);
        DescribeConfigsResult configResult = adminClient.describeConfigs(Collections.singletonList(configResource));
        Flowable<Map<ConfigResource, Config>> configs = Flowable.fromFuture(configResult.all());
        return configs.switchMap(resources -> {
            Config config = resources.get(configResource);
            ConfigEntry ce = config.get(REPLICATION_PROPERTY);
            int replicationFactor = Integer.parseInt(ce.value());
            return nodes.switchMap(nodeList -> clusterId.map(clusterIdString -> {
                int nodeCount = nodeList.size();
                HealthResult.Builder builder;
                if (nodeCount >= replicationFactor) {
                    builder = HealthResult.builder(ID, HealthStatus.UP);
                } else {
                    builder = HealthResult.builder(ID, HealthStatus.DOWN);
                }
                return builder
                        .details(CollectionUtils.mapOf(
                                "brokerId", brokerId,
                                "clusterId", clusterIdString,
                                "nodes", nodeCount
                        )).build();
            }));
        });
    }).onErrorReturn(throwable ->
            HealthResult.builder(ID, HealthStatus.DOWN)
                    .exception(throwable).build()
    );
}
 
Example #4
Source File: ClusterStatus.java    From common-docker with Apache License 2.0 4 votes vote down vote up
/**
 * Checks if the kafka cluster is accepting client requests and
 * has at least minBrokerCount brokers.
 *
 * @param minBrokerCount Expected no of brokers
 * @param timeoutMs timeoutMs in milliseconds
 * @return true is the cluster is ready, false otherwise.
 */
public static boolean isKafkaReady(
    Map<String, String> config,
    int minBrokerCount,
    int timeoutMs
) {

  log.debug("Check if Kafka is ready: {}", config);

  // Need to copy because `config` is Map<String, String> and `create` expects Map<String, Object>
  AdminClient adminClient = AdminClient.create(new HashMap<String, Object>(config));

  long begin = System.currentTimeMillis();
  long remainingWaitMs = timeoutMs;
  Collection<Node> brokers = new ArrayList<>();
  while (remainingWaitMs > 0) {

    // describeCluster does not wait for all brokers to be ready before returning the brokers.
    // So, wait until expected brokers are present or the time out expires.
    try {
      brokers = adminClient.describeCluster(new DescribeClusterOptions().timeoutMs(
              (int) Math.min(Integer.MAX_VALUE, remainingWaitMs))).nodes().get();
      log.debug("Broker list: {}", (brokers != null ? brokers : "[]"));
      if ((brokers != null) && (brokers.size() >= minBrokerCount)) {
        return true;
      }
    } catch (Exception e) {
      log.error("Error while getting broker list.", e);
      // Swallow exceptions because we want to retry until timeoutMs expires.
    }

    sleep(Math.min(BROKER_METADATA_REQUEST_BACKOFF_MS, remainingWaitMs));

    log.info(
        "Expected {} brokers but found only {}. Trying to query Kafka for metadata again ...",
        minBrokerCount,
        brokers == null ? 0 : brokers.size()
    );
    long elapsed = System.currentTimeMillis() - begin;
    remainingWaitMs = timeoutMs - elapsed;
  }

  log.error(
      "Expected {} brokers but found only {}. Brokers found {}.",
      minBrokerCount,
      brokers == null ? 0 : brokers.size(),
      brokers != null ? brokers : "[]"
  );

  return false;
}