Java Code Examples for org.apache.kafka.common.Cluster

The following examples show how to use org.apache.kafka.common.Cluster. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
@Override
public ReplicaMovementStrategy chain(ReplicaMovementStrategy strategy) {
  AbstractReplicaMovementStrategy current = this;
  return new AbstractReplicaMovementStrategy() {
    @Override
    public Comparator<ExecutionTask> taskComparator(Cluster cluster) {
      Comparator<ExecutionTask> comparator1 = current.taskComparator(cluster);
      Comparator<ExecutionTask> comparator2 = strategy.taskComparator(cluster);

      return (task1, task2) -> {
        int compareResult1 = comparator1.compare(task1, task2);
        return compareResult1 == 0 ? comparator2.compare(task1, task2) : compareResult1;
      };
    }

    @Override
    public String name() {
      return current.name() + "," + strategy.name();
    }
  };
}
 
Example 2
Source Project: ad   Source File: CustomPartitioner.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * 决定消息被写入哪个分区
 * @param topic topic
 * @param key key
 * @param keyBytes key serialize byte
 * @param value value
 * @param valueBytes value serialize byte
 * @param cluster kakfa cluster
 * @return
 */
@Override
public int partition(String topic, Object key, byte[] keyBytes,
                     Object value, byte[] valueBytes, Cluster cluster) {
    // 所有分区信息
    List<PartitionInfo> partitionInfos = cluster.partitionsForTopic(topic);
    int partitionCount = partitionInfos.size();
    // 要求必须存在 key,如果key 是"name" 就分配到最后一个分区, 其他key hash取模
    if (keyBytes == null || !key.getClass().equals(String.class)) {
        throw new InvalidRecordException("kafka message must have a String key");
    }
    if (partitionCount == 1 || StringUtils.endsWithIgnoreCase("name", key.toString())) {
        return partitionCount - 1;
    }
    return Math.abs(Utils.murmur2(keyBytes)) % (partitionCount - 1);
}
 
Example 3
Source Project: KafkaExample   Source File: HashPartitioner.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
	List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
	int numPartitions = partitions.size();
	if (keyBytes != null) {
		int hashCode = 0;
		if (key instanceof Integer || key instanceof Long) {
			hashCode = (int) key;
		} else {
			hashCode = key.hashCode();
		}
		hashCode = hashCode & 0x7fffffff;
		return hashCode % numPartitions;
	} else {
		return 0;
	}
}
 
Example 4
/**
 * Scan through topics to check whether the topic having partition(s) with bad replication factor. For each topic, the
 * target replication factor to check against is the maximum value of {@link #SELF_HEALING_TARGET_TOPIC_REPLICATION_FACTOR_CONFIG}
 * and topic's minISR plus value of {@link #TOPIC_REPLICATION_FACTOR_MARGIN_CONFIG}.
 *
 * @param topicsToCheck Set of topics to check.
 * @return Map of detected topic replication factor anomaly entries by target replication factor.
 */
private Map<Short, Set<TopicReplicationFactorAnomalyEntry>> populateBadTopicsByReplicationFactor(Set<String> topicsToCheck, Cluster cluster) {
  Map<Short, Set<TopicReplicationFactorAnomalyEntry>> topicsByReplicationFactor = new HashMap<>();
  for (String topic : topicsToCheck) {
    if (_cachedTopicMinISR.containsKey(topic)) {
      short topicMinISR = _cachedTopicMinISR.get(topic).minISR();
      short targetReplicationFactor = (short) Math.max(_targetReplicationFactor, topicMinISR + _topicReplicationFactorMargin);
      int violatedPartitionCount = 0;
      for (PartitionInfo partitionInfo : cluster.partitionsForTopic(topic)) {
        if (partitionInfo.replicas().length != targetReplicationFactor) {
          violatedPartitionCount++;
        }
      }
      if (violatedPartitionCount > 0) {
        topicsByReplicationFactor.putIfAbsent(targetReplicationFactor, new HashSet<>());
        topicsByReplicationFactor.get(targetReplicationFactor).add(
            new TopicReplicationFactorAnomalyEntry(topic, (double) violatedPartitionCount /  cluster.partitionCountForTopic(topic)));
      }
    }
  }
  return topicsByReplicationFactor;
}
 
Example 5
public ClusterBrokerState(Cluster kafkaCluster, Map<String, Object> adminClientConfigs, KafkaCruiseControlConfig config)
    throws ExecutionException, InterruptedException {
  _kafkaCluster = kafkaCluster;
  _adminClientConfigs = adminClientConfigs;
  _config = config;
  _leaderCountByBrokerId = new TreeMap<>();
  _outOfSyncCountByBrokerId = new TreeMap<>();
  _replicaCountByBrokerId = new TreeMap<>();
  _offlineReplicaCountByBrokerId = new TreeMap<>();
  _isControllerByBrokerId = new TreeMap<>();
  // Gather the broker state.
  populateKafkaBrokerState(_leaderCountByBrokerId,
                           _outOfSyncCountByBrokerId,
                           _replicaCountByBrokerId,
                           _offlineReplicaCountByBrokerId,
                           _isControllerByBrokerId);

  _onlineLogDirsByBrokerId = new TreeMap<>();
  _offlineLogDirsByBrokerId = new TreeMap<>();
  // Broker LogDirs Summary
  populateKafkaBrokerLogDirState(_onlineLogDirsByBrokerId, _offlineLogDirsByBrokerId, _replicaCountByBrokerId.keySet());
}
 
Example 6
/**
 * Get the state with selected substates for Kafka Cruise Control.
 */
@Override
public CruiseControlState getResult() {
  // In case no substate is specified, return all substates.
  Set<CruiseControlState.SubState> substates = !_substates.isEmpty() ? _substates
                                                                     : new HashSet<>(Arrays.asList(CruiseControlState.SubState.values()));

  Cluster cluster = null;
  if (shouldRefreshClusterAndGeneration(substates)) {
    cluster = _kafkaCruiseControl.refreshClusterAndGeneration().cluster();
  }

  return new CruiseControlState(substates.contains(EXECUTOR) ? _kafkaCruiseControl.executorState() : null,
                                substates.contains(MONITOR) ? _kafkaCruiseControl.monitorState(cluster) : null,
                                substates.contains(ANALYZER) ? _kafkaCruiseControl.analyzerState(cluster) : null,
                                substates.contains(ANOMALY_DETECTOR) ? _kafkaCruiseControl.anomalyDetectorState() : null,
                                _kafkaCruiseControl.config());
}
 
Example 7
@Test(expected = IllegalArgumentException.class)
public void testMissingBrokerCapacity() throws TimeoutException, BrokerCapacityResolutionException {
  Set<CruiseControlMetric> metrics = getCruiseControlMetrics();
  // All estimated.
  BrokerCapacityConfigResolver brokerCapacityConfigResolver = EasyMock.mock(BrokerCapacityConfigResolver.class);
  EasyMock.expect(brokerCapacityConfigResolver.capacityForBroker(EasyMock.anyString(), EasyMock.anyString(), EasyMock.anyInt(),
                                                                 EasyMock.anyLong(), EasyMock.anyBoolean()))
          .andReturn(new BrokerCapacityInfo(Collections.emptyMap(), Collections.emptyMap(), MOCK_NUM_CPU_CORES)).anyTimes();
  EasyMock.replay(brokerCapacityConfigResolver);

  CruiseControlMetricsProcessor processor = new CruiseControlMetricsProcessor(brokerCapacityConfigResolver, false);
  for (CruiseControlMetric cruiseControlMetric : metrics) {
    processor.addMetric(cruiseControlMetric);
  }

  Cluster cluster = getCluster();
  processor.process(cluster, TEST_PARTITIONS, MetricSampler.SamplingMode.ALL);
}
 
Example 8
Source Project: cruise-control   Source File: Executor.java    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Check if a task is done.
 */
private boolean isTaskDone(Cluster cluster,
                           Map<ExecutionTask, ReplicaLogDirInfo> logdirInfoByTask,
                           TopicPartition  tp,
                           ExecutionTask task) {
  switch (task.type()) {
    case INTER_BROKER_REPLICA_ACTION:
      return isInterBrokerReplicaActionDone(cluster, tp, task);
    case INTRA_BROKER_REPLICA_ACTION:
      return isIntraBrokerReplicaActionDone(logdirInfoByTask, task);
    case LEADER_ACTION:
      return isLeadershipMovementDone(cluster, tp, task);
    default:
      return true;
  }
}
 
Example 9
Source Project: cruise-control   Source File: LoadMonitor.java    License: BSD 2-Clause "Simplified" License 6 votes vote down vote up
private double getMonitoredPartitionsPercentage() {
  MetadataClient.ClusterAndGeneration clusterAndGeneration = _metadataClient.refreshMetadata();

  Cluster kafkaCluster = clusterAndGeneration.cluster();
  MetricSampleAggregationResult<String, PartitionEntity> metricSampleAggregationResult;
  try {
    metricSampleAggregationResult = _partitionMetricSampleAggregator.aggregate(kafkaCluster,
                                                                               _time.milliseconds(),
                                                                               new OperationProgress());
  } catch (NotEnoughValidWindowsException e) {
    LOG.debug("Not enough valid windows to get monitored partitions. {}", e.getMessage());
    return 0.0;
  }
  Map<PartitionEntity, ValuesAndExtrapolations> partitionLoads = metricSampleAggregationResult.valuesAndExtrapolations();
  AtomicInteger numPartitionsWithExtrapolations = new AtomicInteger(0);
  partitionLoads.values().forEach(valuesAndExtrapolations -> {
    if (!valuesAndExtrapolations.extrapolations().isEmpty()) {
      numPartitionsWithExtrapolations.incrementAndGet();
    }
  });
  _numPartitionsWithExtrapolations = numPartitionsWithExtrapolations.get();
  _totalNumPartitions = MonitorUtils.totalNumPartitions(kafkaCluster);
  return _totalNumPartitions > 0 ? metricSampleAggregationResult.completeness().validEntityRatio() : 0.0;
}
 
Example 10
@Test
public void testClear() {
  List<ExecutionProposal> proposals = new ArrayList<>();
  proposals.add(_leaderMovement1);
  proposals.add(_partitionMovement1);
  ExecutionTaskPlanner planner =
      new ExecutionTaskPlanner(null, new KafkaCruiseControlConfig(KafkaCruiseControlUnitTestUtils.getKafkaCruiseControlProperties()));

  Set<PartitionInfo> partitions = new HashSet<>();

  partitions.add(generatePartitionInfo(_leaderMovement1, false));
  partitions.add(generatePartitionInfo(_partitionMovement1, false));

  Cluster expectedCluster = new Cluster(null,
                                        _expectedNodes,
                                        partitions,
                                        Collections.<String>emptySet(),
                                        Collections.<String>emptySet());

  planner.addExecutionProposals(proposals, expectedCluster, null);
  assertEquals(2, planner.remainingLeadershipMovements().size());
  assertEquals(2, planner.remainingInterBrokerReplicaMovements().size());
  planner.clear();
  assertEquals(0, planner.remainingLeadershipMovements().size());
  assertEquals(0, planner.remainingInterBrokerReplicaMovements().size());
}
 
Example 11
@Test
public void testMissingPartitionSizeMetric() throws TimeoutException, BrokerCapacityResolutionException {
  CruiseControlMetricsProcessor processor = new CruiseControlMetricsProcessor(mockBrokerCapacityConfigResolver(), false);
  Set<CruiseControlMetric> metrics = getCruiseControlMetrics();
  for (CruiseControlMetric metric : metrics) {
    boolean shouldAdd = true;
    if (metric.rawMetricType() == RawMetricType.PARTITION_SIZE) {
      PartitionMetric pm = (PartitionMetric) metric;
      if (pm.topic().equals(TOPIC1) && pm.partition() == P0) {
        shouldAdd = false;
      }
    }
    if (shouldAdd) {
      processor.addMetric(metric);
    }
  }
  Cluster cluster = getCluster();
  MetricSampler.Samples samples = processor.process(cluster, TEST_PARTITIONS, MetricSampler.SamplingMode.ALL);
  assertEquals("Should have ignored partition " + T1P0, 3, samples.partitionMetricSamples().size());
  assertEquals("Should have reported both brokers", 2, samples.brokerMetricSamples().size());
}
 
Example 12
/**
 * Generate the cluster metadata from given cluster model.
 * @param clusterModel The cluster model.
 * @return The cluster metadata.
 */
public static Cluster generateClusterFromClusterModel(ClusterModel clusterModel) {
  Map<Integer, Node> nodes = new HashMap<>();
  clusterModel.brokers().forEach(b -> nodes.put(b.id(), new Node(b.id(), b.host().name(), PORT, b.rack().id())));
  List<PartitionInfo> partitions = new ArrayList<>();
  for (List<Partition> pList : clusterModel.getPartitionsByTopic().values()) {
    for (Partition p : pList) {
      Node[] replicas = new Node [p.replicas().size()];
      for (int i = 0; i < p.replicas().size(); i++) {
        replicas[i] = nodes.get(p.replicas().get(i).broker().id());
      }
      Node[] inSyncReplicas = new Node[p.onlineFollowers().size() + 1];
      inSyncReplicas[0] = nodes.get(p.leader().broker().id());
      for (int i = 0; i < p.onlineFollowers().size(); i++) {
        replicas[i + 1] = nodes.get(p.onlineFollowers().get(i).broker().id());
      }
      partitions.add(new PartitionInfo(p.topicPartition().topic(), p.topicPartition().partition(),
                                       nodes.get(p.leader().broker().id()), replicas, inSyncReplicas));
    }
  }
  return new Cluster(CLUSTER_ID, nodes.values(), partitions, Collections.emptySet(), Collections.emptySet());
}
 
Example 13
/**
 * @param metricSampler The sampler used to retrieve metrics.
 * @param cluster The Kafka cluster.
 * @param sampleStore Sample store to persist the fetched samples, or skip storing samples if {@code null}.
 * @param assignedPartitions Partitions to fetch samples from.
 * @param startTimeMs The start time of the sampling period.
 * @param endTimeMs The end time of the sampling period.
 * @param metricDef The metric definitions.
 * @param fetchTimer The timer to keep track of metric fetch time.
 * @param fetchFailureRate The meter to keep track of failure rate while fetching metrics.
 * @param samplingMode The mode of sampling to indicate the sample type of interest.
 */
public MetricFetcher(MetricSampler metricSampler,
                     Cluster cluster,
                     SampleStore sampleStore,
                     Set<TopicPartition> assignedPartitions,
                     long startTimeMs,
                     long endTimeMs,
                     MetricDef metricDef,
                     Timer fetchTimer,
                     Meter fetchFailureRate,
                     MetricSampler.SamplingMode samplingMode) {
  _metricSampler = metricSampler;
  _cluster = cluster;
  _sampleStore = sampleStore;
  _assignedPartitions = assignedPartitions;
  _startTimeMs = startTimeMs;
  _endTimeMs = endTimeMs;
  _metricDef = metricDef;
  _fetchTimer = fetchTimer;
  _fetchFailureRate = fetchFailureRate;
  _samplingMode = samplingMode;
  _timeout = System.currentTimeMillis() + (endTimeMs - startTimeMs) / 2;
}
 
Example 14
@Test
public void testOptimizeWithDemotedBrokersAndSkipUrpDemotion() throws KafkaCruiseControlException {
  ClusterModelAndInfo clusterModelAndInfo = createClusterModel(false, false);
  ClusterModel clusterModel = clusterModelAndInfo._clusterModel;
  Cluster cluster = clusterModelAndInfo._clusterInfo;
  clusterModel.setBrokerState(1, Broker.State.DEMOTED);

  Map<TopicPartition, List<ReplicaPlacementInfo>> originalReplicaDistribution = clusterModel.getReplicaDistribution();
  PreferredLeaderElectionGoal goal = new PreferredLeaderElectionGoal(true, false, cluster);
  goal.optimize(clusterModel, Collections.emptySet(), new OptimizationOptions(Collections.emptySet()));

  // Operation on under replicated partitions should be skipped.
  for (String t : Arrays.asList(TOPIC1, TOPIC2)) {
    for (int p = 0; p < 3; p++) {
      TopicPartition tp = new TopicPartition(t, p);
      assertEquals("Tp " + tp, originalReplicaDistribution.get(tp), clusterModel.getReplicaDistribution().get(tp));
    }
  }
}
 
Example 15
@Override
public Map<Integer, SortedSet<ExecutionTask>> applyStrategy(Set<ExecutionTask> replicaMovementTasks, Cluster cluster) {
  Map<Integer, SortedSet<ExecutionTask>> tasksByBrokerId = new HashMap<>();

  for (ExecutionTask task : replicaMovementTasks) {
    ExecutionProposal proposal = task.proposal();

    // Add the task to source broker's execution plan
    SortedSet<ExecutionTask> sourceBrokerTaskSet = tasksByBrokerId.computeIfAbsent(proposal.oldLeader().brokerId(),
                                                                                   k -> new TreeSet<>(taskComparator(cluster)));
    if (!sourceBrokerTaskSet.add(task)) {
      throw new IllegalStateException("Replica movement strategy " + this.getClass().getSimpleName() + " failed to determine order of tasks.");
    }

    // Add the task to destination brokers' execution plan
    for (ReplicaPlacementInfo destinationBroker : proposal.replicasToAdd()) {
      SortedSet<ExecutionTask> destinationBrokerTaskSet = tasksByBrokerId.computeIfAbsent(destinationBroker.brokerId(),
                                                                                          k -> new TreeSet<>(taskComparator(cluster)));
      if (!destinationBrokerTaskSet.add(task)) {
        throw new IllegalStateException("Replica movement strategy " + this.getClass().getSimpleName() + " failed to determine order of tasks.");
      }
    }
  }
  return tasksByBrokerId;
}
 
Example 16
Source Project: kafka-helmsman   Source File: TopicServiceImplTest.java    License: MIT License 5 votes vote down vote up
@Test
public void testConfiguredTopic() {
  Cluster cluster = createCluster(1);
  TopicPartitionInfo tp = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList());
  TopicDescription td = new TopicDescription("test", false, Collections.singletonList(tp));
  ConfigEntry configEntry = mock(ConfigEntry.class);
  when(configEntry.source()).thenReturn(ConfigEntry.ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG);
  KafkaFuture<Config> kfc = KafkaFuture.completedFuture(new Config(Collections.singletonList(configEntry)));
  ConfiguredTopic expected = new ConfiguredTopic("test", 1, (short) 1, Collections.emptyMap());
  Assert.assertEquals(expected, TopicServiceImpl.configuredTopic(td, kfc));
}
 
Example 17
Source Project: ameliant-tools   Source File: RoundRobinPartitioner.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
    List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
    int numPartitions = partitions.size();

    return messageCount.incrementAndGet() % numPartitions;
}
 
Example 18
Source Project: kafka-helmsman   Source File: TopicServiceImplTest.java    License: MIT License 5 votes vote down vote up
@Test
public void testListExisting() {
  Cluster cluster = createCluster(1);
  TopicPartitionInfo tp = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList());
  ConfigEntry configEntry = new ConfigEntry("k", "v");
  KafkaFuture<Config> kfc = KafkaFuture.completedFuture(new Config(Collections.singletonList(configEntry)));
  Set<String> topicNames = new HashSet<>(Arrays.asList("a", "b", "_c"));
  Map<String, TopicDescription> tds = new HashMap<String, TopicDescription>() {
    {
      put("a", new TopicDescription("a", false, Collections.singletonList(tp)));
      put("b", new TopicDescription("b", false, Collections.singletonList(tp)));
      put("c", new TopicDescription("_c", false, Collections.singletonList(tp)));
    }
  };
  Map<ConfigResource, KafkaFuture<Config>> configs = new HashMap<ConfigResource, KafkaFuture<Config>>() {
    {
      put(new ConfigResource(TOPIC, "a"), kfc);
      put(new ConfigResource(TOPIC, "b"), kfc);
      put(new ConfigResource(TOPIC, "_c"), kfc);
    }
  };

  TopicService service = new TopicServiceImpl(adminClient, true);
  ListTopicsResult listTopicsResult = mock(ListTopicsResult.class);
  DescribeTopicsResult describeTopicsResult = mock(DescribeTopicsResult.class);
  DescribeConfigsResult describeConfigsResult = mock(DescribeConfigsResult.class);

  when(describeTopicsResult.all()).thenReturn(KafkaFuture.completedFuture(tds));
  when(listTopicsResult.names()).thenReturn(KafkaFuture.completedFuture(topicNames));
  when(describeConfigsResult.values()).thenReturn(configs);
  when(adminClient.listTopics(any(ListTopicsOptions.class))).thenReturn(listTopicsResult);
  when(adminClient.describeTopics(topicNames)).thenReturn(describeTopicsResult);
  when(adminClient.describeConfigs(any(Collection.class))).thenReturn(describeConfigsResult);

  Map<String, ConfiguredTopic> actual = service.listExisting(true);
  Assert.assertEquals(2, actual.size());
  Assert.assertEquals(new HashSet<>(Arrays.asList("a", "b")), actual.keySet());
}
 
Example 19
Source Project: presto   Source File: NumberPartitioner.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster)
{
    if (key instanceof Number) {
        return toIntExact(((Number) key).longValue() % cluster.partitionCountForTopic(topic));
    }
    return 0;
}
 
Example 20
Source Project: ZTuoExchange_framework   Source File: kafkaPartitioner.java    License: MIT License 5 votes vote down vote up
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
       // 交易相关的随机分配,其他分配到0分区
	if(topic.startsWith("exchange")) {
           return random();
       }else {
       	return 0;
       }
}
 
Example 21
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
    Object newKey = null;
    if (key != null) {
        PurchaseKey purchaseKey = (PurchaseKey) key;
        newKey = purchaseKey.getCustomerId();
        keyBytes = ((String) newKey).getBytes();
    }
    return super.partition(topic, newKey, keyBytes, value, valueBytes, cluster);
}
 
Example 22
Source Project: sylph   Source File: SimplePartitioner.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster)
{
    if (key != null) {
        String stringKey = key.toString();
        int offset = stringKey.hashCode();
        return Math.abs(offset % cluster.partitionCountForTopic(topic));
    }
    else {
        return 0;
    }
}
 
Example 23
Source Project: ja-micro   Source File: SixtPartitionerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setup() {
    cluster = PowerMockito.mock(Cluster.class);
    List<PartitionInfo> partitions = new ArrayList<>();
    for (int i = 0; i < 5; i++) {
        partitions.add(new PartitionInfo(null, 1, null, null, null));
    }
    when(cluster.partitionsForTopic(anyString())).thenReturn(partitions);
}
 
Example 24
Source Project: common-kafka   Source File: FairPartitionerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws InterruptedException {
    partitioner = new FairPartitioner();
    topic = testName.getMethodName();
    key = new Object();
    keyBytes = new byte[0];
    value = new Object();
    valueBytes = new byte[0];

    node = new Node(1, "example.com", 6667);

    allPartitions =
            IntStream.range(0, 8).mapToObj(i -> {
                //null leader means not available
                Node leader = null;
                if(i % 2 == 0){
                    //a non-null leader means it is available
                    leader = node;
                }
                return new PartitionInfo(topic, i, leader, null, null);
            }).collect(Collectors.toList());
    notAvailablePartitions = allPartitions.stream().filter(p -> p.leader() == null).collect(Collectors.toList());

    cluster = new Cluster("clusterId", Collections.singleton(node), allPartitions,
            Collections.emptySet(), Collections.emptySet());

    // Wait until next clock window tick.
    long millis = System.currentTimeMillis() / FairPartitioner.ROTATE_MILLIS;
    while (System.currentTimeMillis() / FairPartitioner.ROTATE_MILLIS == millis) {
        Thread.sleep(1);
    }
}
 
Example 25
Source Project: common-kafka   Source File: FairPartitionerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void partitionNotAvailable() {
    cluster = new Cluster("clusterId", Collections.singleton(node), notAvailablePartitions,
            Collections.emptySet(), Collections.emptySet());
    int partition = partitioner.partition(topic, key, keyBytes, value, valueBytes, cluster);
    assertThat(partition, is(lessThan(allPartitions.size())));
    assertThat(partition, is(greaterThanOrEqualTo(0)));
    assertThat(partition % 2, is(1));
}
 
Example 26
public KafkaClusterState(Cluster kafkaCluster,
                         TopicConfigProvider topicConfigProvider,
                         Map<String, Object> adminClientConfigs,
                         KafkaCruiseControlConfig config) {
  super(config);
  _kafkaCluster = kafkaCluster;
  _allTopicConfigs = topicConfigProvider.allTopicConfigs();
  _clusterConfigs = topicConfigProvider.clusterConfigs();
  _adminClientConfigs = adminClientConfigs;
}
 
Example 27
public ClusterPartitionState(boolean verbose, Pattern topicPattern, Cluster kafkaCluster,
                             Map<String, Properties> allTopicConfigs, Properties clusterConfigs) {
  _kafkaCluster = kafkaCluster;
  _allTopicConfigs = allTopicConfigs;
  _clusterConfigs = clusterConfigs;
  Comparator<PartitionInfo> comparator = Comparator.comparing(PartitionInfo::topic).thenComparingInt(PartitionInfo::partition);
  _underReplicatedPartitions = new TreeSet<>(comparator);
  _offlinePartitions = new TreeSet<>(comparator);
  _otherPartitions = new TreeSet<>(comparator);
  _partitionsWithOfflineReplicas = new TreeSet<>(comparator);
  _underMinIsrPartitions = new TreeSet<>(comparator);
  // Gather the partition state.
  populateKafkaPartitionState(_underReplicatedPartitions, _offlinePartitions, _otherPartitions,
      _partitionsWithOfflineReplicas, _underMinIsrPartitions, verbose, topicPattern);
}
 
Example 28
/**
 * Populate cluster rack information for topics to change replication factor. In the process this method also conducts a sanity
 * check to ensure that there are enough brokers and racks in the cluster to allocate new replicas to racks which do not host
 * replica of the same partition.
 *
 * @param topicsByReplicationFactor Topics to change replication factor by target replication factor.
 * @param cluster Current cluster state.
 * @param skipTopicRackAwarenessCheck Whether to skip the rack awareness sanity check or not.
 * @param brokersByRack Mapping from rack to broker.
 * @param rackByBroker Mapping from broker to rack.
 */
public static void populateRackInfoForReplicationFactorChange(Map<Short, Set<String>> topicsByReplicationFactor,
                                                              Cluster cluster,
                                                              boolean skipTopicRackAwarenessCheck,
                                                              Map<String, List<Integer>> brokersByRack,
                                                              Map<Integer, String> rackByBroker) {
  for (Node node : cluster.nodes()) {
    String rack = getRackHandleNull(node);
    brokersByRack.putIfAbsent(rack, new ArrayList<>());
    brokersByRack.get(rack).add(node.id());
    rackByBroker.put(node.id(), rack);
  }

  topicsByReplicationFactor.forEach((replicationFactor, topics) -> {
    if (replicationFactor > rackByBroker.size()) {
      throw new RuntimeException(String.format("Unable to change replication factor (RF) of topics %s to %d since there are only %d "
                                               + "alive brokers in the cluster. Requested RF cannot be more than number of alive brokers.",
                                               topics, replicationFactor, rackByBroker.size()));
    } else if (replicationFactor > brokersByRack.size()) {
      if (skipTopicRackAwarenessCheck) {
        LOG.info("Target replication factor for topics {} is {}, which is larger than number of racks in cluster. Rack-awareness "
                 + "property will be violated to add new replicas.", topics, replicationFactor);
      } else {
        throw new RuntimeException(String.format("Unable to change replication factor of topics %s to %d since there are only %d "
                                                 + "racks in the cluster, to skip the rack-awareness check, set %s to true in the request.",
                                                 topics, replicationFactor, brokersByRack.size(), ParameterUtils.SKIP_RACK_AWARENESS_CHECK_PARAM));
      }
    }
  });
}
 
Example 29
/**
 * Populate topics to change replication factor based on the request and current cluster state.
 * @param topicPatternByReplicationFactor Requested topic patterns to change replication factor by target replication factor.
 * @param cluster Current cluster state.
 * @return Topics to change replication factor by target replication factor.
 */
public static Map<Short, Set<String>> topicsForReplicationFactorChange(Map<Short, Pattern> topicPatternByReplicationFactor,
                                                                       Cluster cluster) {
  Map<Short, Set<String>> topicsToChangeByReplicationFactor = new HashMap<>(topicPatternByReplicationFactor.size());
  for (Map.Entry<Short, Pattern> entry : topicPatternByReplicationFactor.entrySet()) {
    short replicationFactor = entry.getKey();
    Pattern topicPattern = entry.getValue();
    Set<String> topics = cluster.topics().stream().filter(t -> topicPattern.matcher(t).matches()).collect(Collectors.toSet());
    // Ensure there are topics matching the requested topic pattern.
    if (topics.isEmpty()) {
      throw new IllegalStateException(String.format("There is no topic in cluster matching pattern '%s'.", topicPattern));
    }
    Set<String> topicsToChange = topics.stream()
                                       .filter(t -> cluster.partitionsForTopic(t).stream().anyMatch(p -> p.replicas().length != replicationFactor))
                                       .collect(Collectors.toSet());
    if (!topicsToChange.isEmpty()) {
      topicsToChangeByReplicationFactor.put(replicationFactor, topicsToChange);
    }
  }

  if (topicsToChangeByReplicationFactor.isEmpty()) {
    throw new IllegalStateException(String.format("All topics matching given pattern already have target replication factor. Requested "
                                                  + "topic pattern by replication factor: %s.", topicPatternByReplicationFactor));
  }
  // Sanity check that no topic is set with more than one target replication factor.
  sanityCheckTargetReplicationFactorForTopic(topicsToChangeByReplicationFactor);
  return topicsToChangeByReplicationFactor;
}
 
Example 30
Source Project: datacollector   Source File: ExpressionPartitioner.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public int partition(
  String topic,
  Object key,
  byte[] keyBytes,
  Object value,
  byte[] valueBytes,
  Cluster cluster
) {
  return Integer.parseInt((String)key);
}