Java Code Examples for org.apache.kafka.common.Node#id()

The following examples show how to use org.apache.kafka.common.Node#id() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaPartitionMetricSampleAggregator.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * This is a simple sanity check on the sample data. We only verify that
 * <p>
 * 1. the broker of the sampled data is from the broker who holds the leader replica. If it is not, we simply
 * discard the data because leader migration may have occurred so the metrics on the old data might not be
 * accurate anymore.
 * <p>
 * 2. The sample contains metric for all the resources.
 *
 * @param sample the sample to do the sanity check.
 * @param leaderValidation whether do the leader validation or not.
 * @return {@code true} if the sample is valid.
 */
private boolean isValidSample(PartitionMetricSample sample, boolean leaderValidation) {
  boolean validLeader = true;
  if (leaderValidation) {
    Node leader = _metadata.fetch().leaderFor(sample.entity().tp());
    validLeader = (leader != null) && (sample.brokerId() == leader.id());
    if (!validLeader) {
      LOG.warn("The metric sample is discarded due to invalid leader. Current leader {}, Sample: {}", leader, sample);
    }
  }

  // TODO: We do not have the replication bytes rate at this point. Use the default validation after they are available.
  boolean completeMetrics = sample.isValid(_metricDef) || (sample.allMetricValues().size() == _metricDef.size() - 2
                                                           && sample.allMetricValues().containsKey(_metricDef.metricInfo(
                                                               KafkaMetricDef.REPLICATION_BYTES_IN_RATE.name()).id())
                                                           && sample.allMetricValues().containsKey(_metricDef.metricInfo(
                                                               KafkaMetricDef.REPLICATION_BYTES_OUT_RATE.name()).id()));

  if (!completeMetrics) {
    LOG.warn("The metric sample is discarded due to missing metrics. Sample: {}", sample);
  }
  return validLeader && completeMetrics;
}
 
Example 2
Source File: KafkaTopicPartitionLeader.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public KafkaTopicPartitionLeader(KafkaTopicPartition topicPartition, Node leader) {
	this.topicPartition = topicPartition;
	if (leader == null) {
		this.leaderId = -1;
		this.leaderHost = null;
		this.leaderPort = -1;
	} else {
		this.leaderId = leader.id();
		this.leaderPort = leader.port();
		this.leaderHost = leader.host();
	}
	int cachedHash = (leader == null) ? 14 : leader.hashCode();
	this.cachedHash = 31 * cachedHash + topicPartition.hashCode();
}
 
Example 3
Source File: KafkaAvailability.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
private Set<TopicDescription> groupTopicsByBroker(Collection<TopicDescription> tds, int podId) {
    Set<TopicDescription> topicPartitionInfos = new HashSet<>();
    for (TopicDescription td : tds) {
        log.trace("{}", td);
        for (TopicPartitionInfo pd : td.partitions()) {
            for (Node broker : pd.replicas()) {
                if (podId == broker.id()) {
                    topicPartitionInfos.add(td);
                }
            }
        }
    }
    return topicPartitionInfos;
}
 
Example 4
Source File: MultiClusterTopicManagementService.java    From kafka-monitor with Apache License 2.0 5 votes vote down vote up
void maybeAddPartitions(int minPartitionNum) throws ExecutionException, InterruptedException {
  Map<String, KafkaFuture<TopicDescription>> kafkaFutureMap =
      _adminClient.describeTopics(Collections.singleton(_topic)).values();
  KafkaFuture<TopicDescription> topicDescriptions = kafkaFutureMap.get(_topic);
  List<TopicPartitionInfo> partitions = topicDescriptions.get().partitions();

  int partitionNum = partitions.size();
  if (partitionNum < minPartitionNum) {
    LOGGER.info("{} will increase partition of the topic {} in the cluster from {}"
        + " to {}.", this.getClass().toString(), _topic, partitionNum, minPartitionNum);
    Set<Integer> blackListedBrokers = _topicFactory.getBlackListedBrokers(_zkConnect);
    Set<BrokerMetadata> brokers = new HashSet<>();
    for (Node broker : _adminClient.describeCluster().nodes().get()) {
      BrokerMetadata brokerMetadata = new BrokerMetadata(
          broker.id(), null
      );
      brokers.add(brokerMetadata);
    }

    if (!blackListedBrokers.isEmpty()) {
      brokers.removeIf(broker -> blackListedBrokers.contains(broker.id()));
    }

    List<List<Integer>> newPartitionAssignments = newPartitionAssignments(minPartitionNum, partitionNum, brokers, _replicationFactor);

    NewPartitions newPartitions = NewPartitions.increaseTo(minPartitionNum, newPartitionAssignments);

    Map<String, NewPartitions> newPartitionsMap = new HashMap<>();
    newPartitionsMap.put(_topic, newPartitions);
    CreatePartitionsResult createPartitionsResult = _adminClient.createPartitions(newPartitionsMap);

  }
}
 
Example 5
Source File: KafkaTopicPartitionLeader.java    From flink with Apache License 2.0 5 votes vote down vote up
public KafkaTopicPartitionLeader(KafkaTopicPartition topicPartition, Node leader) {
	this.topicPartition = topicPartition;
	if (leader == null) {
		this.leaderId = -1;
		this.leaderHost = null;
		this.leaderPort = -1;
	} else {
		this.leaderId = leader.id();
		this.leaderPort = leader.port();
		this.leaderHost = leader.host();
	}
	int cachedHash = (leader == null) ? 14 : leader.hashCode();
	this.cachedHash = 31 * cachedHash + topicPartition.hashCode();
}
 
Example 6
Source File: KafkaTopicPartitionLeader.java    From flink with Apache License 2.0 5 votes vote down vote up
public KafkaTopicPartitionLeader(KafkaTopicPartition topicPartition, Node leader) {
	this.topicPartition = topicPartition;
	if (leader == null) {
		this.leaderId = -1;
		this.leaderHost = null;
		this.leaderPort = -1;
	} else {
		this.leaderId = leader.id();
		this.leaderPort = leader.port();
		this.leaderHost = leader.host();
	}
	int cachedHash = (leader == null) ? 14 : leader.hashCode();
	this.cachedHash = 31 * cachedHash + topicPartition.hashCode();
}
 
Example 7
Source File: Executor.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * The completeness of leadership movement depends on the task state:
 * IN_PROGRESS: done when the leader becomes the destination.
 * ABORTING or DEAD: always considered as done the destination cannot become leader anymore.
 *
 * There should be no other task state seen here.
 */
private boolean isLeadershipMovementDone(Cluster cluster, TopicPartition tp, ExecutionTask task) {
  Node leader = cluster.leaderFor(tp);
  switch (task.state()) {
    case IN_PROGRESS:
      return (leader != null && leader.id() == task.proposal().newLeader().brokerId())
             || leader == null
             || !isInIsr(task.proposal().newLeader().brokerId(), cluster, tp);
    case ABORTING:
    case DEAD:
      return true;
    default:
      throw new IllegalStateException("Should never be here.");
  }
}
 
Example 8
Source File: ExecutionTaskPlanner.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * For each proposal, create a leader action task if there is a need for moving the leadership to reach expected final proposal state.
 *
 * @param proposals Execution proposals.
 * @param cluster Kafka cluster state.
 */
private void maybeAddLeaderChangeTasks(Collection<ExecutionProposal> proposals, Cluster cluster) {
  for (ExecutionProposal proposal : proposals) {
    if (proposal.hasLeaderAction()) {
      Node currentLeader = cluster.leaderFor(proposal.topicPartition());
      if (currentLeader != null && currentLeader.id() != proposal.newLeader().brokerId()) {
        // Get the execution Id for the leader action proposal execution;
        long leaderActionExecutionId = _executionId++;
        ExecutionTask leaderActionTask = new ExecutionTask(leaderActionExecutionId, proposal, LEADER_ACTION, _taskExecutionAlertingThresholdMs);
        _remainingLeadershipMovements.put(leaderActionExecutionId, leaderActionTask);
        LOG.trace("Added action {} as leader proposal {}", leaderActionExecutionId, proposal);
      }
    }
  }
}
 
Example 9
Source File: BrokerNodeFunction.java    From data-highway with Apache License 2.0 5 votes vote down vote up
public BrokerNode apply(Predicate<String> hostNamePredicate) {
  Collection<Node> nodes = KafkaFutures.join(client.describeCluster().nodes());

  try {
    Node node = find(nodes, n -> hostNamePredicate.test(n.host()));
    log.debug("Using broker {}", node);
    return new BrokerNode(node.id(), ofNullable(node.rack()).orElse("none"), node.host());
  } catch (NoSuchElementException e) {
    throw new RuntimeException("No broker found on localhost!");
  }
}
 
Example 10
Source File: SamplingUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Create a {@link BrokerMetricSample}, record the relevant metrics for the given broker, and return the sample.
 *
 * @param node Node hosting the broker.
 * @param brokerLoadById Load information for brokers by the broker id.
 * @param maxMetricTimestamp Maximum timestamp of the sampled metric during the sampling process.
 * @return Metric sample populated with broker metrics, or {@code null} if sample generation is skipped.
 */
static BrokerMetricSample buildBrokerMetricSample(Node node,
                                                  Map<Integer, BrokerLoad> brokerLoadById,
                                                  long maxMetricTimestamp) throws UnknownVersionException {
  BrokerLoad brokerLoad = brokerLoadById.get(node.id());
  if (skipBuildingBrokerMetricSample(brokerLoad, node.id())) {
    return null;
  }
  MetricDef brokerMetricDef = KafkaMetricDef.brokerMetricDef();
  BrokerMetricSample bms = new BrokerMetricSample(node.host(), node.id(), brokerLoad.brokerSampleDeserializationVersion());
  for (Map.Entry<Byte, Set<RawMetricType>> entry : RawMetricType.brokerMetricTypesDiffByVersion().entrySet()) {
    for (RawMetricType rawBrokerMetricType : entry.getValue()) {
      // We require the broker to report all the metric types (including nullable values). Otherwise we skip the broker.
      if (!brokerLoad.brokerMetricAvailable(rawBrokerMetricType)) {
        LOG.warn("{}broker {} because it does not have {} metrics (serde version {}) or the metrics are inconsistent.",
                 SKIP_BUILDING_SAMPLE_PREFIX, node.id(), rawBrokerMetricType, entry.getKey());
        return null;
      } else {
        MetricInfo metricInfo = brokerMetricDef.metricInfo(KafkaMetricDef.forRawMetricType(rawBrokerMetricType).name());
        double metricValue = brokerLoad.brokerMetric(rawBrokerMetricType);
        bms.record(metricInfo, metricValue);
      }
    }
  }

  // Disk usage is not one of the broker raw metric type.
  bms.record(brokerMetricDef.metricInfo(KafkaMetricDef.DISK_USAGE.name()), brokerLoad.diskUsage());
  bms.close(maxMetricTimestamp);
  return bms;
}
 
Example 11
Source File: SamplingUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
/**
 * Create a {@link PartitionMetricSample}, record the relevant metrics for the given partition from the given topic on
 * broker that hosts the given number of leaders, and return the sample.
 *
 * @param cluster Kafka cluster.
 * @param leaderDistribution The leader count per topic/broker
 * @param tpDotNotHandled The original topic name that may contain dots.
 * @param brokerLoadById Load information for brokers by the broker id.
 * @param maxMetricTimestamp Maximum timestamp of the sampled metric during the sampling process.
 * @param cachedNumCoresByBroker Cached number of cores by broker.
 * @param skippedPartitionByBroker Number of skipped partition samples by broker ids.
 * @return Metric sample populated with topic and partition metrics, or {@code null} if sample generation is skipped.
 */
static PartitionMetricSample buildPartitionMetricSample(Cluster cluster,
                                                        Map<Integer, Map<String, Integer>> leaderDistribution,
                                                        TopicPartition tpDotNotHandled,
                                                        Map<Integer, BrokerLoad> brokerLoadById,
                                                        long maxMetricTimestamp,
                                                        Map<Integer, Short> cachedNumCoresByBroker,
                                                        Map<Integer, Integer> skippedPartitionByBroker) {
  Node leaderNode = cluster.leaderFor(tpDotNotHandled);
  if (leaderNode == null) {
    LOG.trace("Partition {} has no current leader.", tpDotNotHandled);
    skippedPartitionByBroker.merge(UNRECOGNIZED_BROKER_ID, 1, Integer::sum);
    return null;
  }
  int leaderId = leaderNode.id();
  //TODO: switch to linear regression model without computing partition level CPU usage.
  BrokerLoad brokerLoad = brokerLoadById.get(leaderId);
  TopicPartition tpWithDotHandled = partitionHandleDotInTopicName(tpDotNotHandled);
  if (skipBuildingPartitionMetricSample(tpDotNotHandled, tpWithDotHandled, leaderId, brokerLoad, cachedNumCoresByBroker)) {
    skippedPartitionByBroker.merge(leaderId, 1, Integer::sum);
    return null;
  }

  // Fill in all the common metrics.
  MetricDef commonMetricDef = KafkaMetricDef.commonMetricDef();
  PartitionMetricSample pms = new PartitionMetricSample(leaderId, tpDotNotHandled);
  int numLeaders = leaderDistribution.get(leaderId).get(tpDotNotHandled.topic());
  for (RawMetricType rawMetricType : RawMetricType.topicMetricTypes()) {
    double sampleValue = numLeaders == 0 ? 0 : (brokerLoad.topicMetrics(tpWithDotHandled.topic(), rawMetricType)) / numLeaders;
    MetricInfo metricInfo = commonMetricDef.metricInfo(KafkaMetricDef.forRawMetricType(rawMetricType).name());
    pms.record(metricInfo, sampleValue);
  }
  // Fill in disk and CPU utilization, which are not topic metric types.
  Double partitionSize = brokerLoad.partitionMetric(tpWithDotHandled.topic(), tpWithDotHandled.partition(), PARTITION_SIZE);
  if (partitionSize == null) {
    skippedPartitionByBroker.merge(leaderId, 1, Integer::sum);
    return null;
  }
  pms.record(commonMetricDef.metricInfo(KafkaMetricDef.DISK_USAGE.name()), partitionSize);
  Double estimatedLeaderCpuUtil = estimateLeaderCpuUtil(pms, brokerLoad, commonMetricDef, cachedNumCoresByBroker.get(leaderId));
  if (estimatedLeaderCpuUtil == null) {
    skippedPartitionByBroker.merge(leaderId, 1, Integer::sum);
    return null;
  }
  pms.record(commonMetricDef.metricInfo(KafkaMetricDef.CPU_USAGE.name()), estimatedLeaderCpuUtil);
  pms.close(maxMetricTimestamp);
  return pms;
}
 
Example 12
Source File: FlinkKafkaProducer.java    From flink with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public int getTransactionCoordinatorId() {
	Object transactionManager = getValue(kafkaProducer, "transactionManager");
	Node node = (Node) invoke(transactionManager, "coordinator", FindCoordinatorRequest.CoordinatorType.TRANSACTION);
	return node.id();
}
 
Example 13
Source File: FlinkKafkaInternalProducer.java    From flink with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public int getTransactionCoordinatorId() {
	Object transactionManager = getField(kafkaProducer, "transactionManager");
	Node node = (Node) invoke(transactionManager, "coordinator", FindCoordinatorRequest.CoordinatorType.TRANSACTION);
	return node.id();
}
 
Example 14
Source File: MonitorUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
/**
 * Create replicas of the partition with the given (1) identifier and (2) load information to populate the given cluster model.
 * If partition with the given identifier does not exist in the given cluster, do nothing.
 *
 * @param cluster Kafka cluster.
 * @param clusterModel The cluster model to populate load information.
 * @param tp Topic partition that identifies the partition to populate the load for.
 * @param valuesAndExtrapolations The values and extrapolations of the leader replica.
 * @param replicaPlacementInfo The distribution of replicas over broker logdirs if available, {@code null} otherwise.
 * @param brokerCapacityConfigResolver The resolver for retrieving broker capacities.
 * @param allowCapacityEstimation whether allow capacity estimation in cluster model if the underlying live broker capacity is unavailable.
 */
static void populatePartitionLoad(Cluster cluster,
                                  ClusterModel clusterModel,
                                  TopicPartition tp,
                                  ValuesAndExtrapolations valuesAndExtrapolations,
                                  Map<TopicPartition, Map<Integer, String>> replicaPlacementInfo,
                                  BrokerCapacityConfigResolver brokerCapacityConfigResolver,
                                  boolean allowCapacityEstimation)
    throws TimeoutException {
  PartitionInfo partitionInfo = cluster.partition(tp);
  // If partition info does not exist, the topic may have been deleted.
  if (partitionInfo != null) {
    Set<Integer> aliveBrokers = cluster.nodes().stream().mapToInt(Node::id).boxed().collect(Collectors.toSet());
    boolean needToAdjustCpuUsage = true;
    Set<Integer> deadBrokersWithUnknownCapacity = new HashSet<>();
    for (int index = 0; index < partitionInfo.replicas().length; index++) {
      Node replica = partitionInfo.replicas()[index];
      String rack = getRackHandleNull(replica);
      BrokerCapacityInfo brokerCapacity;
      try {
        // Do not allow capacity estimation for dead brokers.
        brokerCapacity = brokerCapacityConfigResolver.capacityForBroker(rack, replica.host(), replica.id(), BROKER_CAPACITY_FETCH_TIMEOUT_MS,
                                                                        aliveBrokers.contains(replica.id()) && allowCapacityEstimation);
      } catch (TimeoutException | BrokerCapacityResolutionException e) {
        // Capacity resolver may not be able to return the capacity information of dead brokers.
        if (!aliveBrokers.contains(replica.id())) {
          brokerCapacity = new BrokerCapacityInfo(EMPTY_BROKER_CAPACITY);
          deadBrokersWithUnknownCapacity.add(replica.id());
        } else {
          String errorMessage = String.format("Unable to retrieve capacity for broker %d. This may be caused by churn in "
                                              + "the cluster, please retry.", replica.id());
          LOG.warn(errorMessage, e);
          throw new TimeoutException(errorMessage);
        }
      }
      clusterModel.handleDeadBroker(rack, replica.id(), brokerCapacity);
      boolean isLeader;
      if (partitionInfo.leader() == null) {
        LOG.warn("Detected offline partition {}-{}, skipping", partitionInfo.topic(), partitionInfo.partition());
        continue;
      } else {
        isLeader = replica.id() == partitionInfo.leader().id();
      }
      boolean isOffline = Arrays.stream(partitionInfo.offlineReplicas())
                                .anyMatch(offlineReplica -> offlineReplica.id() == replica.id());

      String logdir = replicaPlacementInfo == null ? null : replicaPlacementInfo.get(tp).get(replica.id());
      // If the replica's logdir is null, it is either because replica placement information is not populated for the cluster
      // model or this replica is hosted on a dead disk and is not considered for intra-broker replica operations.
      clusterModel.createReplica(rack, replica.id(), tp, index, isLeader, isOffline, logdir, false);
      clusterModel.setReplicaLoad(rack,
                                  replica.id(),
                                  tp,
                                  getAggregatedMetricValues(valuesAndExtrapolations,
                                                            cluster.partition(tp),
                                                            isLeader,
                                                            needToAdjustCpuUsage),
                                  valuesAndExtrapolations.windows());
      needToAdjustCpuUsage = false;
    }
    if (!deadBrokersWithUnknownCapacity.isEmpty()) {
      LOG.info("Assign empty capacity to brokers {} because they are dead and capacity resolver is unable to fetch their capacity.",
               deadBrokersWithUnknownCapacity);
    }
  }
}
 
Example 15
Source File: MonitorUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
private static boolean leaderChanged(PartitionInfo prevPartInfo, PartitionInfo currPartInfo) {
  Node prevLeader = prevPartInfo.leader();
  Node currLeader = currPartInfo.leader();
  return !(prevLeader == null && currLeader == null) && !(prevLeader != null && currLeader != null
                                                          && prevLeader.id() == currLeader.id());
}
 
Example 16
Source File: ClusterBrokerState.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
/**
 * Gather the Kafka broker state within the given under leader, out-of-sync, and replica counts.
 *
 * @param leaderCountByBrokerId Leader count by broker id.
 * @param outOfSyncCountByBrokerId Out of sync replica count by broker id.
 * @param replicaCountByBrokerId Replica count by broker id.
 * @param offlineReplicaCountByBrokerId Offline replica count by broker id.
 * @param isControllerByBrokerId Controller information by broker id.
 */
protected void populateKafkaBrokerState(Map<Integer, Integer> leaderCountByBrokerId,
                                        Map<Integer, Integer> outOfSyncCountByBrokerId,
                                        Map<Integer, Integer> replicaCountByBrokerId,
                                        Map<Integer, Integer> offlineReplicaCountByBrokerId,
                                        Map<Integer, Boolean> isControllerByBrokerId) {
  // Part-1: Gather the states of brokers with replicas.
  for (String topic : _kafkaCluster.topics()) {
    for (PartitionInfo partitionInfo : _kafkaCluster.partitionsForTopic(topic)) {
      if (partitionInfo.leader() == null) {
        continue;
      }
      leaderCountByBrokerId.merge(partitionInfo.leader().id(), 1, Integer::sum);

      Set<Integer> replicas = Arrays.stream(partitionInfo.replicas()).map(Node::id).collect(Collectors.toSet());
      Set<Integer> inSyncReplicas = Arrays.stream(partitionInfo.inSyncReplicas()).map(Node::id).collect(Collectors.toSet());
      Set<Integer> outOfSyncReplicas = new HashSet<>(replicas);
      outOfSyncReplicas.removeAll(inSyncReplicas);
      Set<Integer> offlineReplicas = Arrays.stream(partitionInfo.offlineReplicas()).map(Node::id).collect(Collectors.toSet());

      outOfSyncReplicas.forEach(brokerId -> outOfSyncCountByBrokerId.merge(brokerId, 1, Integer::sum));
      offlineReplicas.forEach(brokerId -> offlineReplicaCountByBrokerId.merge(brokerId, 1, Integer::sum));
      replicas.forEach(brokerId -> replicaCountByBrokerId.merge(brokerId, 1, Integer::sum));
    }
  }
  // Part-2: Gather the states of brokers without replicas.
  for (Node node : _kafkaCluster.nodes()) {
    int nodeId = node.id();
    if (replicaCountByBrokerId.get(nodeId) == null) {
      offlineReplicaCountByBrokerId.put(nodeId, 0);
      replicaCountByBrokerId.put(nodeId, 0);
      outOfSyncCountByBrokerId.put(nodeId, 0);
      leaderCountByBrokerId.put(nodeId, 0);
    }
  }
  // Part-3: Gather controller information.
  replicaCountByBrokerId.keySet().forEach(brokerId -> isControllerByBrokerId.put(brokerId, false));
  Node controller = _kafkaCluster.controller();
  if (controller != null) {
    isControllerByBrokerId.put(controller.id(), true);
  }
}
 
Example 17
Source File: FlinkKafkaProducer.java    From flink with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public int getTransactionCoordinatorId() {
	Object transactionManager = getValue(kafkaProducer, "transactionManager");
	Node node = (Node) invoke(transactionManager, "coordinator", FindCoordinatorRequest.CoordinatorType.TRANSACTION);
	return node.id();
}
 
Example 18
Source File: FlinkKafkaInternalProducer.java    From flink with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public int getTransactionCoordinatorId() {
	Object transactionManager = getValue(kafkaProducer, "transactionManager");
	Node node = (Node) invoke(transactionManager, "coordinator", FindCoordinatorRequest.CoordinatorType.TRANSACTION);
	return node.id();
}
 
Example 19
Source File: FlinkKafkaProducer.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public int getTransactionCoordinatorId() {
	Object transactionManager = getValue(kafkaProducer, "transactionManager");
	Node node = (Node) invoke(transactionManager, "coordinator", FindCoordinatorRequest.CoordinatorType.TRANSACTION);
	return node.id();
}
 
Example 20
Source File: FlinkKafkaInternalProducer.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public int getTransactionCoordinatorId() {
	Object transactionManager = getValue(kafkaProducer, "transactionManager");
	Node node = (Node) invoke(transactionManager, "coordinator", FindCoordinatorRequest.CoordinatorType.TRANSACTION);
	return node.id();
}