org.apache.kafka.common.Cluster Java Examples

The following examples show how to use org.apache.kafka.common.Cluster. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AbstractReplicaMovementStrategy.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Override
public Map<Integer, SortedSet<ExecutionTask>> applyStrategy(Set<ExecutionTask> replicaMovementTasks, Cluster cluster) {
  Map<Integer, SortedSet<ExecutionTask>> tasksByBrokerId = new HashMap<>();

  for (ExecutionTask task : replicaMovementTasks) {
    ExecutionProposal proposal = task.proposal();

    // Add the task to source broker's execution plan
    SortedSet<ExecutionTask> sourceBrokerTaskSet = tasksByBrokerId.computeIfAbsent(proposal.oldLeader().brokerId(),
                                                                                   k -> new TreeSet<>(taskComparator(cluster)));
    if (!sourceBrokerTaskSet.add(task)) {
      throw new IllegalStateException("Replica movement strategy " + this.getClass().getSimpleName() + " failed to determine order of tasks.");
    }

    // Add the task to destination brokers' execution plan
    for (ReplicaPlacementInfo destinationBroker : proposal.replicasToAdd()) {
      SortedSet<ExecutionTask> destinationBrokerTaskSet = tasksByBrokerId.computeIfAbsent(destinationBroker.brokerId(),
                                                                                          k -> new TreeSet<>(taskComparator(cluster)));
      if (!destinationBrokerTaskSet.add(task)) {
        throw new IllegalStateException("Replica movement strategy " + this.getClass().getSimpleName() + " failed to determine order of tasks.");
      }
    }
  }
  return tasksByBrokerId;
}
 
Example #2
Source File: ExecutionTaskPlannerTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test
public void testClear() {
  List<ExecutionProposal> proposals = new ArrayList<>();
  proposals.add(_leaderMovement1);
  proposals.add(_partitionMovement1);
  ExecutionTaskPlanner planner =
      new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties()));

  Set<PartitionInfo> partitions = new HashSet<>();

  partitions.add(generatePartitionInfo(_leaderMovement1, false));
  partitions.add(generatePartitionInfo(_partitionMovement1, false));

  Cluster expectedCluster = new Cluster(null,
                                        _expectedNodes,
                                        partitions,
                                        Collections.<String>emptySet(),
                                        Collections.<String>emptySet());

  planner.addExecutionProposals(proposals, expectedCluster, null);
  assertEquals(2, planner.remainingLeadershipMovements().size());
  assertEquals(2, planner.remainingInterBrokerReplicaMovements().size());
  planner.clear();
  assertEquals(0, planner.remainingLeadershipMovements().size());
  assertEquals(0, planner.remainingInterBrokerReplicaMovements().size());
}
 
Example #3
Source File: TopicReplicationFactorAnomalyFinder.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Scan through topics to check whether the topic having partition(s) with bad replication factor. For each topic, the
 * target replication factor to check against is the maximum value of {@link #SELF_HEALING_TARGET_TOPIC_REPLICATION_FACTOR_CONFIG}
 * and topic's minISR plus value of {@link #TOPIC_REPLICATION_FACTOR_MARGIN_CONFIG}.
 *
 * @param topicsToCheck Set of topics to check.
 * @return Map of detected topic replication factor anomaly entries by target replication factor.
 */
private Map<Short, Set<TopicReplicationFactorAnomalyEntry>> populateBadTopicsByReplicationFactor(Set<String> topicsToCheck, Cluster cluster) {
  Map<Short, Set<TopicReplicationFactorAnomalyEntry>> topicsByReplicationFactor = new HashMap<>();
  for (String topic : topicsToCheck) {
    if (_cachedTopicMinISR.containsKey(topic)) {
      short topicMinISR = _cachedTopicMinISR.get(topic).minISR();
      short targetReplicationFactor = (short) Math.max(_targetReplicationFactor, topicMinISR + _topicReplicationFactorMargin);
      int violatedPartitionCount = 0;
      for (PartitionInfo partitionInfo : cluster.partitionsForTopic(topic)) {
        if (partitionInfo.replicas().length != targetReplicationFactor) {
          violatedPartitionCount++;
        }
      }
      if (violatedPartitionCount > 0) {
        topicsByReplicationFactor.putIfAbsent(targetReplicationFactor, new HashSet<>());
        topicsByReplicationFactor.get(targetReplicationFactor).add(
            new TopicReplicationFactorAnomalyEntry(topic, (double) violatedPartitionCount /  cluster.partitionCountForTopic(topic)));
      }
    }
  }
  return topicsByReplicationFactor;
}
 
Example #4
Source File: LoadMonitor.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
private double getMonitoredPartitionsPercentage() {
  MetadataClient.ClusterAndGeneration clusterAndGeneration = _metadataClient.refreshMetadata();

  Cluster kafkaCluster = clusterAndGeneration.cluster();
  MetricSampleAggregationResult<String, PartitionEntity> metricSampleAggregationResult;
  try {
    metricSampleAggregationResult = _partitionMetricSampleAggregator.aggregate(kafkaCluster,
                                                                               _time.milliseconds(),
                                                                               new OperationProgress());
  } catch (NotEnoughValidWindowsException e) {
    LOG.debug("Not enough valid windows to get monitored partitions. {}", e.getMessage());
    return 0.0;
  }
  Map<PartitionEntity, ValuesAndExtrapolations> partitionLoads = metricSampleAggregationResult.valuesAndExtrapolations();
  AtomicInteger numPartitionsWithExtrapolations = new AtomicInteger(0);
  partitionLoads.values().forEach(valuesAndExtrapolations -> {
    if (!valuesAndExtrapolations.extrapolations().isEmpty()) {
      numPartitionsWithExtrapolations.incrementAndGet();
    }
  });
  _numPartitionsWithExtrapolations = numPartitionsWithExtrapolations.get();
  _totalNumPartitions = MonitorUtils.totalNumPartitions(kafkaCluster);
  return _totalNumPartitions > 0 ? metricSampleAggregationResult.completeness().validEntityRatio() : 0.0;
}
 
Example #5
Source File: CruiseControlMetricsProcessorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test
public void testMissingPartitionSizeMetric() throws TimeoutException, BrokerCapacityResolutionException {
  CruiseControlMetricsProcessor processor = new CruiseControlMetricsProcessor(mockBrokerCapacityConfigResolver(), false);
  Set<CruiseControlMetric> metrics = getCruiseControlMetrics();
  for (CruiseControlMetric metric : metrics) {
    boolean shouldAdd = true;
    if (metric.rawMetricType() == RawMetricType.PARTITION_SIZE) {
      PartitionMetric pm = (PartitionMetric) metric;
      if (pm.topic().equals(TOPIC1) && pm.partition() == P0) {
        shouldAdd = false;
      }
    }
    if (shouldAdd) {
      processor.addMetric(metric);
    }
  }
  Cluster cluster = getCluster();
  MetricSampler.Samples samples = processor.process(cluster, TEST_PARTITIONS, MetricSampler.SamplingMode.ALL);
  assertEquals("Should have ignored partition " + T1P0, 3, samples.partitionMetricSamples().size());
  assertEquals("Should have reported both brokers", 2, samples.brokerMetricSamples().size());
}
 
Example #6
Source File: ClusterBrokerState.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
public ClusterBrokerState(Cluster kafkaCluster, Map<String, Object> adminClientConfigs, KafkaCruiseControlConfig config)
    throws ExecutionException, InterruptedException {
  _kafkaCluster = kafkaCluster;
  _adminClientConfigs = adminClientConfigs;
  _config = config;
  _leaderCountByBrokerId = new TreeMap<>();
  _outOfSyncCountByBrokerId = new TreeMap<>();
  _replicaCountByBrokerId = new TreeMap<>();
  _offlineReplicaCountByBrokerId = new TreeMap<>();
  _isControllerByBrokerId = new TreeMap<>();
  // Gather the broker state.
  populateKafkaBrokerState(_leaderCountByBrokerId,
                           _outOfSyncCountByBrokerId,
                           _replicaCountByBrokerId,
                           _offlineReplicaCountByBrokerId,
                           _isControllerByBrokerId);

  _onlineLogDirsByBrokerId = new TreeMap<>();
  _offlineLogDirsByBrokerId = new TreeMap<>();
  // Broker LogDirs Summary
  populateKafkaBrokerLogDirState(_onlineLogDirsByBrokerId, _offlineLogDirsByBrokerId, _replicaCountByBrokerId.keySet());
}
 
Example #7
Source File: DeterministicCluster.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Generate the cluster metadata from given cluster model.
 * @param clusterModel The cluster model.
 * @return The cluster metadata.
 */
public static Cluster generateClusterFromClusterModel(ClusterModel clusterModel) {
  Map<Integer, Node> nodes = new HashMap<>();
  clusterModel.brokers().forEach(b -> nodes.put(b.id(), new Node(b.id(), b.host().name(), PORT, b.rack().id())));
  List<PartitionInfo> partitions = new ArrayList<>();
  for (List<Partition> pList : clusterModel.getPartitionsByTopic().values()) {
    for (Partition p : pList) {
      Node[] replicas = new Node [p.replicas().size()];
      for (int i = 0; i < p.replicas().size(); i++) {
        replicas[i] = nodes.get(p.replicas().get(i).broker().id());
      }
      Node[] inSyncReplicas = new Node[p.onlineFollowers().size() + 1];
      inSyncReplicas[0] = nodes.get(p.leader().broker().id());
      for (int i = 0; i < p.onlineFollowers().size(); i++) {
        replicas[i + 1] = nodes.get(p.onlineFollowers().get(i).broker().id());
      }
      partitions.add(new PartitionInfo(p.topicPartition().topic(), p.topicPartition().partition(),
                                       nodes.get(p.leader().broker().id()), replicas, inSyncReplicas));
    }
  }
  return new Cluster(CLUSTER_ID, nodes.values(), partitions, Collections.emptySet(), Collections.emptySet());
}
 
Example #8
Source File: MetricFetcher.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * @param metricSampler The sampler used to retrieve metrics.
 * @param cluster The Kafka cluster.
 * @param sampleStore Sample store to persist the fetched samples, or skip storing samples if {@code null}.
 * @param assignedPartitions Partitions to fetch samples from.
 * @param startTimeMs The start time of the sampling period.
 * @param endTimeMs The end time of the sampling period.
 * @param metricDef The metric definitions.
 * @param fetchTimer The timer to keep track of metric fetch time.
 * @param fetchFailureRate The meter to keep track of failure rate while fetching metrics.
 * @param samplingMode The mode of sampling to indicate the sample type of interest.
 */
public MetricFetcher(MetricSampler metricSampler,
                     Cluster cluster,
                     SampleStore sampleStore,
                     Set<TopicPartition> assignedPartitions,
                     long startTimeMs,
                     long endTimeMs,
                     MetricDef metricDef,
                     Timer fetchTimer,
                     Meter fetchFailureRate,
                     MetricSampler.SamplingMode samplingMode) {
  _metricSampler = metricSampler;
  _cluster = cluster;
  _sampleStore = sampleStore;
  _assignedPartitions = assignedPartitions;
  _startTimeMs = startTimeMs;
  _endTimeMs = endTimeMs;
  _metricDef = metricDef;
  _fetchTimer = fetchTimer;
  _fetchFailureRate = fetchFailureRate;
  _samplingMode = samplingMode;
  _timeout = System.currentTimeMillis() + (endTimeMs - startTimeMs) / 2;
}
 
Example #9
Source File: PreferredLeaderElectionGoalTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test
public void testOptimizeWithDemotedBrokersAndSkipUrpDemotion() throws KafkaCruiseControlException {
  ClusterModelAndInfo clusterModelAndInfo = createClusterModel(false, false);
  ClusterModel clusterModel = clusterModelAndInfo._clusterModel;
  Cluster cluster = clusterModelAndInfo._clusterInfo;
  clusterModel.setBrokerState(1, Broker.State.DEMOTED);

  Map<TopicPartition, List<ReplicaPlacementInfo>> originalReplicaDistribution = clusterModel.getReplicaDistribution();
  PreferredLeaderElectionGoal goal = new PreferredLeaderElectionGoal(true, false, cluster);
  goal.optimize(clusterModel, Collections.emptySet(), new OptimizationOptions(Collections.emptySet()));

  // Operation on under replicated partitions should be skipped.
  for (String t : Arrays.asList(TOPIC1, TOPIC2)) {
    for (int p = 0; p < 3; p++) {
      TopicPartition tp = new TopicPartition(t, p);
      assertEquals("Tp " + tp, originalReplicaDistribution.get(tp), clusterModel.getReplicaDistribution().get(tp));
    }
  }
}
 
Example #10
Source File: GetStateRunnable.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Get the state with selected substates for Kafka Cruise Control.
 */
@Override
public CruiseControlState getResult() {
  // In case no substate is specified, return all substates.
  Set<CruiseControlState.SubState> substates = !_substates.isEmpty() ? _substates
                                                                     : new HashSet<>(Arrays.asList(CruiseControlState.SubState.values()));

  Cluster cluster = null;
  if (shouldRefreshClusterAndGeneration(substates)) {
    cluster = _kafkaCruiseControl.refreshClusterAndGeneration().cluster();
  }

  return new CruiseControlState(substates.contains(EXECUTOR) ? _kafkaCruiseControl.executorState() : null,
                                substates.contains(MONITOR) ? _kafkaCruiseControl.monitorState(cluster) : null,
                                substates.contains(ANALYZER) ? _kafkaCruiseControl.analyzerState(cluster) : null,
                                substates.contains(ANOMALY_DETECTOR) ? _kafkaCruiseControl.anomalyDetectorState() : null,
                                _kafkaCruiseControl.config());
}
 
Example #11
Source File: Executor.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Check if a task is done.
 */
private boolean isTaskDone(Cluster cluster,
                           Map<ExecutionTask, ReplicaLogDirInfo> logdirInfoByTask,
                           TopicPartition  tp,
                           ExecutionTask task) {
  switch (task.type()) {
    case INTER_BROKER_REPLICA_ACTION:
      return isInterBrokerReplicaActionDone(cluster, tp, task);
    case INTRA_BROKER_REPLICA_ACTION:
      return isIntraBrokerReplicaActionDone(logdirInfoByTask, task);
    case LEADER_ACTION:
      return isLeadershipMovementDone(cluster, tp, task);
    default:
      return true;
  }
}
 
Example #12
Source File: HashPartitioner.java    From KafkaExample with Apache License 2.0 6 votes vote down vote up
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
	List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
	int numPartitions = partitions.size();
	if (keyBytes != null) {
		int hashCode = 0;
		if (key instanceof Integer || key instanceof Long) {
			hashCode = (int) key;
		} else {
			hashCode = key.hashCode();
		}
		hashCode = hashCode & 0x7fffffff;
		return hashCode % numPartitions;
	} else {
		return 0;
	}
}
 
Example #13
Source File: CustomPartitioner.java    From ad with Apache License 2.0 6 votes vote down vote up
/**
 * 决定消息被写入哪个分区
 * @param topic topic
 * @param key key
 * @param keyBytes key serialize byte
 * @param value value
 * @param valueBytes value serialize byte
 * @param cluster kakfa cluster
 * @return
 */
@Override
public int partition(String topic, Object key, byte[] keyBytes,
                     Object value, byte[] valueBytes, Cluster cluster) {
    // 所有分区信息
    List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topic);
    int partitionCount = partitionInfos.size();
    // 要求必须存在 key,如果key 是"name" 就分配到最后一个分区, 其他key hash取模
    if (keyBytes == null || !key.getClass().equals(String.class)) {
        throw new InvalidRecordException("kafka message must have a String key");
    }
    if (partitionCount == 1 || StringUtils.endsWithIgnoreCase("name", key.toString())) {
        return partitionCount - 1;
    }
    return Math.abs(Utils.murmur2(keyBytes)) % (partitionCount - 1);
}
 
Example #14
Source File: AbstractReplicaMovementStrategy.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Override
public ReplicaMovementStrategy chain(ReplicaMovementStrategy strategy) {
  AbstractReplicaMovementStrategy current = this;
  return new AbstractReplicaMovementStrategy() {
    @Override
    public Comparator<ExecutionTask> taskComparator(Cluster cluster) {
      Comparator<ExecutionTask> comparator1 = current.taskComparator(cluster);
      Comparator<ExecutionTask> comparator2 = strategy.taskComparator(cluster);

      return (task1, task2) -> {
        int compareResult1 = comparator1.compare(task1, task2);
        return compareResult1 == 0 ? comparator2.compare(task1, task2) : compareResult1;
      };
    }

    @Override
    public String name() {
      return current.name() + "," + strategy.name();
    }
  };
}
 
Example #15
Source File: CruiseControlMetricsProcessorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test(expected = IllegalArgumentException.class)
public void testMissingBrokerCapacity() throws TimeoutException, BrokerCapacityResolutionException {
  Set<CruiseControlMetric> metrics = getCruiseControlMetrics();
  // All estimated.
  BrokerCapacityConfigResolver brokerCapacityConfigResolver = EasyMock.mock(BrokerCapacityConfigResolver.class);
  EasyMock.expect(brokerCapacityConfigResolver.capacityForBroker(EasyMock.anyString(), EasyMock.anyString(), EasyMock.anyInt(),
                                                                 EasyMock.anyLong(), EasyMock.anyBoolean()))
          .andReturn(new BrokerCapacityInfo(Collections.emptyMap(), Collections.emptyMap(), MOCK_NUM_CPU_CORES)).anyTimes();
  EasyMock.replay(brokerCapacityConfigResolver);

  CruiseControlMetricsProcessor processor = new CruiseControlMetricsProcessor(brokerCapacityConfigResolver, false);
  for (CruiseControlMetric cruiseControlMetric : metrics) {
    processor.addMetric(cruiseControlMetric);
  }

  Cluster cluster = getCluster();
  processor.process(cluster, TEST_PARTITIONS, MetricSampler.SamplingMode.ALL);
}
 
Example #16
Source File: LoadMonitor.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * @return True if the monitored load meets the given completeness requirements, false otherwise.
 */
public boolean meetCompletenessRequirements(Cluster cluster, ModelCompletenessRequirements requirements) {
  int numValidWindows =
      _partitionMetricSampleAggregator.validWindows(cluster, requirements.minMonitoredPartitionsPercentage()).size();
  int requiredNumValidWindows = requirements.minRequiredNumWindows();
  return numValidWindows >= requiredNumValidWindows;
}
 
Example #17
Source File: DefaultPartitioner.java    From azeroth with Apache License 2.0 5 votes vote down vote up
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes,
                     Cluster cluster) {
    List<PartitionInfo> partitions = cluster.availablePartitionsForTopic(topic);
    int numPartitions = partitions.size();

    try {
        long partitionHash = ((DefaultMessage) value).getPartitionHash();
        //按hash分区
        if (partitionHash > 0) {
            long index = partitionHash % numPartitions;
            //System.out.println("numPartitions:"+numPartitions+",partitionHash:"+partitionHash + ",index:"+index);
            return (int) index;
        }
    } catch (ClassCastException e) {
    }

    if (keyBytes == null) {
        int nextValue = counter.getAndIncrement();
        List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic);
        if (availablePartitions.size() > 0) {
            int part = DefaultPartitioner.toPositive(nextValue) % availablePartitions.size();
            return availablePartitions.get(part).partition();
        } else {
            // no partitions are available, give a non-available partition
            return DefaultPartitioner.toPositive(nextValue) % numPartitions;
        }
    } else {
        // hash the keyBytes to choose a partition
        return DefaultPartitioner.toPositive(Utils.murmur2(keyBytes)) % numPartitions;
    }
}
 
Example #18
Source File: KafkaPartitionMetricSampleAggregator.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Get a sorted set of valid windows in the aggregator. A valid window is a window with
 * {@link MonitorConfig#MIN_VALID_PARTITION_RATIO_CONFIG enough valid partitions}
 * being monitored. A valid partition must be valid in all the windows in the returned set.
 *
 * @param cluster Kafka cluster.
 * @param minMonitoredPartitionsPercentage the minimum required monitored partitions percentage.
 * @return A sorted set of valid windows in the aggregator.
 */
public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) {
  AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage,
                                                                                 0.0,
                                                                                 1,
                                                                                 _maxAllowedExtrapolationsPerPartition,
                                                                                 allPartitions(cluster),
                                                                                 AggregationOptions.Granularity.ENTITY,
                                                                                 true);
  MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options);
  return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs);
}
 
Example #19
Source File: HolderUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Check if a broker raw metric is reasonable to be missing. As of now, it looks that only the following metrics
 * might be missing:
 * <ul>
 *   <li>BROKER_FOLLOWER_FETCH_REQUEST_RATE (with additional constraints)</li>
 *   <li>BROKER_LOG_FLUSH_RATE</li>
 *   <li>BROKER_LOG_FLUSH_TIME_MS_MEAN</li>
 *   <li>BROKER_LOG_FLUSH_TIME_MS_MAX</li>
 *   <li>BROKER_LOG_FLUSH_TIME_MS_50TH</li>
 *   <li>BROKER_LOG_FLUSH_TIME_MS_999TH</li>
 *   <li>BROKER_PRODUCE_REQUEST_RATE</li>
 *   <li>BROKER_CONSUMER_FETCH_REQUEST_RATE</li>
 * </ul>
 * When these raw metrics are missing, broker load is expected to use {@link #MISSING_BROKER_METRIC_VALUE} as the value.
 *
 * @param cluster The Kafka cluster.
 * @param brokerId The id of the broker whose raw metric is missing
 * @param rawMetricType The raw metric type that is missing.
 * @return True if the missing is allowed, false otherwise.
 */
static boolean allowMissingBrokerMetric(Cluster cluster, int brokerId, RawMetricType rawMetricType) {
  switch (rawMetricType) {
    case BROKER_FOLLOWER_FETCH_REQUEST_RATE:
      for (PartitionInfo partitionInfo : cluster.partitionsForNode(brokerId)) {
        // If there is at least one leader partition on the broker that meets the following condition:
        // 1. replication factor is greater than 1,
        // 2. there are more than one alive replicas.
        // Then the broker must report BrokerFollowerFetchRequestRate.
        if (partitionInfo.replicas().length > 1
            && partitionInfo.leader() != null
            && partitionInfo.leader().id() == brokerId) {
          return false;
        }
      }
      return true;
    case BROKER_LOG_FLUSH_RATE:
    case BROKER_LOG_FLUSH_TIME_MS_MEAN:
    case BROKER_LOG_FLUSH_TIME_MS_MAX:
    case BROKER_LOG_FLUSH_TIME_MS_50TH:
    case BROKER_LOG_FLUSH_TIME_MS_999TH:
    case BROKER_PRODUCE_REQUEST_RATE:
    case BROKER_CONSUMER_FETCH_REQUEST_RATE:
      return true;
    default:
      return false;
  }
}
 
Example #20
Source File: KafkaPartitionMetricSampleAggregator.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Get the valid partitions percentage across all the windows.
 *
 * @param cluster Kafka cluster.
 * @return The percentage of valid partitions across all the windows.
 */
public double monitoredPercentage(Cluster cluster) {
  AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(0.0,
                                                                                 0.0,
                                                                                 1,
                                                                                 _maxAllowedExtrapolationsPerPartition,
                                                                                 allPartitions(cluster),
                                                                                 AggregationOptions.Granularity.ENTITY,
                                                                                 true);
  MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options);
  return completeness.validEntityRatio();
}
 
Example #21
Source File: KafkaPartitionMetricSampleAggregator.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
private AggregationOptions<String, PartitionEntity> toAggregationOptions(Cluster cluster,
                                                                         ModelCompletenessRequirements requirements) {
  Set<PartitionEntity> allPartitions = allPartitions(cluster);
  return new AggregationOptions<>(requirements.minMonitoredPartitionsPercentage(),
                                  0.0,
                                  requirements.minRequiredNumWindows(),
                                  _maxAllowedExtrapolationsPerPartition,
                                  allPartitions,
                                  AggregationOptions.Granularity.ENTITY,
                                  requirements.includeAllTopics());
}
 
Example #22
Source File: Executor.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * For a inter-broker replica movement action, the completion depends on the task state:
 * IN_PROGRESS: when the current replica list is the same as the new replica list and all replicas are in-sync.
 * ABORTING: done when the current replica list is the same as the old replica list. Due to race condition,
 *           we also consider it done if the current replica list is the same as the new replica list and all replicas
 *           are in-sync.
 * DEAD: always considered as done because we neither move forward or rollback.
 *
 * There should be no other task state seen here.
 */
private boolean isInterBrokerReplicaActionDone(Cluster cluster, TopicPartition tp, ExecutionTask task) {
  PartitionInfo partitionInfo = cluster.partition(tp);
  switch (task.state()) {
    case IN_PROGRESS:
      return task.proposal().isInterBrokerMovementCompleted(partitionInfo);
    case ABORTING:
      return task.proposal().isInterBrokerMovementAborted(partitionInfo);
    case DEAD:
      return true;
    default:
      throw new IllegalStateException("Should never be here. State " + task.state());
  }
}
 
Example #23
Source File: LoadMonitorTaskRunnerTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
public Samples getSamples(Cluster cluster,
                          Set<TopicPartition> assignedPartitions,
                          long startTime,
                          long endTime,
                          SamplingMode mode,
                          MetricDef metricDef,
                          long timeout) throws MetricSamplingException {

  if (_exceptionsLeft > 0) {
    _exceptionsLeft--;
    throw new MetricSamplingException("Error");
  }
  Set<PartitionMetricSample> partitionMetricSamples = new HashSet<>(assignedPartitions.size());
  for (TopicPartition tp : assignedPartitions) {
    PartitionMetricSample sample = new PartitionMetricSample(cluster.partition(tp).leader().id(), tp);
    long now = TIME.milliseconds();
    for (Resource resource : Resource.cachedValues()) {
      for (MetricInfo metricInfo : KafkaMetricDef.resourceToMetricInfo(resource)) {
        sample.record(metricInfo, now);
      }
    }
    sample.close(now);
    partitionMetricSamples.add(sample);
  }

  return new Samples(partitionMetricSamples, Collections.emptySet());
}
 
Example #24
Source File: KeyModPartitioner.java    From SkyEye with GNU General Public License v3.0 5 votes vote down vote up
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
    List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
    int numPartitions = partitions.size();
    int partitionNum = 0;
    try {
        partitionNum = Utils.murmur2(keyBytes);
    } catch (Exception e) {
        partitionNum = key.hashCode();
    }

    return Math.abs(partitionNum % numPartitions);
}
 
Example #25
Source File: LoadMonitor.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Get cluster capacity, and skip populating cluster load. Enables quick retrieval of capacity without the load.
 * @return Cluster capacity without cluster load.
 */
public ClusterModel clusterCapacity() throws TimeoutException, BrokerCapacityResolutionException {
  MetadataClient.ClusterAndGeneration clusterAndGeneration = _metadataClient.refreshMetadata();
  Cluster cluster = clusterAndGeneration.cluster();

  // Create an empty cluster model first.
  ModelGeneration modelGeneration = new ModelGeneration(clusterAndGeneration.generation(), -1L);
  ClusterModel clusterModel = new ClusterModel(modelGeneration, 0.0);

  populateClusterCapacity(false, false, clusterModel, cluster);
  // Set the state of bad brokers in clusterModel based on the Kafka cluster state.
  setBadBrokerState(clusterModel, cluster);
  return clusterModel;
}
 
Example #26
Source File: CruiseControlMetricsProcessorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
private Cluster getCluster() {
  Node node0 = new Node(BROKER_ID_0, "localhost", 100, "rack0");
  Node node1 = new Node(BROKER_ID_1, "localhost", 100, "rack1");
  Node[] nodes = {node0, node1};
  Set<Node> allNodes = new HashSet<>();
  allNodes.add(node0);
  allNodes.add(node1);
  Set<PartitionInfo> parts = new HashSet<>();
  parts.add(new PartitionInfo(TOPIC1, P0, node0, nodes, nodes));
  parts.add(new PartitionInfo(TOPIC1, P1, node1, nodes, nodes));
  parts.add(new PartitionInfo(TOPIC2, P0, node0, nodes, nodes));
  parts.add(new PartitionInfo(TOPIC2, P1, node0, nodes, nodes));
  return new Cluster("testCluster", allNodes, parts, Collections.emptySet(), Collections.emptySet());
}
 
Example #27
Source File: MonitorUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * @param cluster Kafka cluster.
 * @return All the brokers in the cluster that host at least one replica.
 */
static Set<Integer> brokersWithReplicas(Cluster cluster) {
  Set<Integer> allBrokers = new HashSet<>();
  for (String topic : cluster.topics()) {
    for (PartitionInfo partition : cluster.partitionsForTopic(topic)) {
      Arrays.stream(partition.replicas()).map(Node::id).forEach(allBrokers::add);
    }
  }
  return allBrokers;
}
 
Example #28
Source File: MonitorUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * @return True if the metadata has changed, false otherwise.
 */
public static boolean metadataChanged(Cluster prev, Cluster curr) {
  // Broker has changed.
  Set<Node> prevNodeSet = new HashSet<>(prev.nodes());
  if (prevNodeSet.size() != curr.nodes().size()) {
    return true;
  }
  prevNodeSet.removeAll(curr.nodes());
  if (!prevNodeSet.isEmpty()) {
    return true;
  }
  // Topic has changed
  if (!prev.topics().equals(curr.topics())) {
    return true;
  }

  // partition has changed.
  for (String topic : prev.topics()) {
    if (!prev.partitionCountForTopic(topic).equals(curr.partitionCountForTopic(topic))) {
      return true;
    }
    for (PartitionInfo prevPartInfo : prev.partitionsForTopic(topic)) {
      PartitionInfo currPartInfo = curr.partition(new TopicPartition(prevPartInfo.topic(), prevPartInfo.partition()));
      if (leaderChanged(prevPartInfo, currPartInfo) || replicaListChanged(prevPartInfo, currPartInfo)) {
        return true;
      }
    }
  }
  return false;
}
 
Example #29
Source File: RoundRobinPartitioner.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
  List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
  int numPartitions = partitions.size();
  int p = counter % numPartitions;
  counter++;
  return partitions.get(p).partition();
}
 
Example #30
Source File: ExpressionPartitioner20.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Override
public int partition(
  String topic,
  Object key,
  byte[] keyBytes,
  Object value,
  byte[] valueBytes,
  Cluster cluster
) {
  return Integer.parseInt((String)key);
}