org.apache.kafka.clients.Metadata Java Examples

The following examples show how to use org.apache.kafka.clients.Metadata. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaNodeClient.java    From feeyo-redisproxy with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
public KafkaNodeClient(int id, String host, int port) {
	node = new Node(id, host, port);
	
	//
	LogContext logContext = new LogContext("ctx");

	ConfigDef defConf = new ConfigDef();
	defConf.define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, ConfigDef.Type.STRING,
			CommonClientConfigs.DEFAULT_SECURITY_PROTOCOL, ConfigDef.Importance.MEDIUM,
			CommonClientConfigs.SECURITY_PROTOCOL_DOC);

	defConf.define(SaslConfigs.SASL_MECHANISM, ConfigDef.Type.STRING, SaslConfigs.DEFAULT_SASL_MECHANISM,
			ConfigDef.Importance.MEDIUM, SaslConfigs.SASL_MECHANISM_DOC);

	metrics = new Metrics(Time.SYSTEM);

	AbstractConfig config = new AbstractConfig(defConf, new Properties());
	channelBuilder = ClientUtils.createChannelBuilder(config);
	selector = new Selector(1000L, metrics, Time.SYSTEM, "cc", channelBuilder, logContext);
	client = new NetworkClient(selector, new Metadata(0, Long.MAX_VALUE, false),
			CLIENT_ID, 10, 1000L, 1000L, 1, 1024, 1000, Time.SYSTEM, true, new ApiVersions(),
			null, logContext);
}
 
Example #2
Source File: NetworkClientProvider.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Creates a new network client with the given properties.
 *
 * @return A new network client with the given properties.
 */
NetworkClient createNetworkClient(long connectionMaxIdleMS,
                                  Metrics metrics,
                                  Time time,
                                  String metricGrpPrefix,
                                  ChannelBuilder channelBuilder,
                                  Metadata metadata,
                                  String clientId,
                                  int maxInFlightRequestsPerConnection,
                                  long reconnectBackoffMs,
                                  long reconnectBackoffMax,
                                  int socketSendBuffer,
                                  int socketReceiveBuffer,
                                  int defaultRequestTimeoutMs,
                                  boolean discoverBrokerVersions,
                                  ApiVersions apiVersions);
 
Example #3
Source File: KafkaNetworkClientProvider.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Override
public NetworkClient createNetworkClient(long connectionMaxIdleMS,
                                         Metrics metrics,
                                         Time time,
                                         String metricGrpPrefix,
                                         ChannelBuilder channelBuilder,
                                         Metadata metadata,
                                         String clientId,
                                         int maxInFlightRequestsPerConnection,
                                         long reconnectBackoffMs,
                                         long reconnectBackoffMax,
                                         int socketSendBuffer,
                                         int socketReceiveBuffer,
                                         int defaultRequestTimeoutMs,
                                         boolean discoverBrokerVersions,
                                         ApiVersions apiVersions) {
  return new NetworkClient(new Selector(connectionMaxIdleMS, metrics, time, metricGrpPrefix, channelBuilder, new LogContext()),
                           metadata, clientId, maxInFlightRequestsPerConnection, reconnectBackoffMs,
                           reconnectBackoffMax, socketSendBuffer, socketReceiveBuffer, defaultRequestTimeoutMs,
                           ClientDnsLookup.DEFAULT, time, discoverBrokerVersions, apiVersions, new LogContext());
}
 
Example #4
Source File: LoadMonitor.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Construct a load monitor.
 *
 * @param config The load monitor configuration.
 * @param time   The time object.
 * @param executor The proposal executor.
 * @param dropwizardMetricRegistry The sensor registry for cruise control
 * @param metricDef The metric definitions.
 */
public LoadMonitor(KafkaCruiseControlConfig config,
                   Time time,
                   Executor executor,
                   MetricRegistry dropwizardMetricRegistry,
                   MetricDef metricDef) {
  this(config,
       new MetadataClient(config,
                          new Metadata(METADATA_REFRESH_BACKOFF,
                                       config.getLong(MonitorConfig.METADATA_MAX_AGE_CONFIG),
                                       new LogContext(),
                                       new ClusterResourceListeners()),
                          METADATA_TTL,
                          time),
       KafkaCruiseControlUtils.createAdminClient(KafkaCruiseControlUtils.parseAdminClientConfigs(config)),
       time,
       executor,
       dropwizardMetricRegistry,
       metricDef);
}
 
Example #5
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * 3 Topics with 2 partitions each.
 * T0P1 has all the windows with AVG_AVAILABLE as extrapolations.
 * T1P1 misses window 6000 (index=5), 7000 (index=6)
 * All other partitions have full data.
 */
private TestContext setupScenario4() {
  TopicPartition t0p1 = new TopicPartition(TOPIC0, 1);
  TopicPartition t1p0 = new TopicPartition("TOPIC1", 0);
  TopicPartition t1p1 = new TopicPartition("TOPIC1", 1);
  TopicPartition t2p0 = new TopicPartition("TOPIC2", 0);
  TopicPartition t2p1 = new TopicPartition("TOPIC2", 1);
  List<TopicPartition> allPartitions = Arrays.asList(TP, t0p1, t1p0, t1p1, t2p0, t2p1);
  Properties props = getLoadMonitorProperties();
  props.setProperty(MAX_ALLOWED_EXTRAPOLATIONS_PER_PARTITION_CONFIG, "0");
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(props);
  Metadata metadata = getMetadata(allPartitions);
  KafkaPartitionMetricSampleAggregator aggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  for (TopicPartition tp : Arrays.asList(TP, t1p0, t2p0, t2p1)) {
    populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, aggregator, tp);
  }
  // Let t0p1 have too many extrapolations.
  populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW - 1, aggregator, t0p1);
  // let t1p1 miss another earlier window
  populateSampleAggregator(5, MIN_SAMPLES_PER_WINDOW, aggregator, t1p1);
  CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 6, MIN_SAMPLES_PER_WINDOW,
                                                      aggregator, new PartitionEntity(t1p1), 7, WINDOW_MS, KafkaMetricDef
                                                          .commonMetricDef());
  return new TestContext(metadata, aggregator);
}
 
Example #6
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Two topics with 2 partitions each.
 * T1P1 misses window 6000 (index=5), 7000 (index=6) and 20000 (index=19)
 * Other partitions has full data.
 */
private TestContext setupScenario2() {
  TopicPartition t0p1 = new TopicPartition(TOPIC0, 1);
  TopicPartition t1p0 = new TopicPartition("TOPIC1", 0);
  TopicPartition t1p1 = new TopicPartition("TOPIC1", 1);
  List<TopicPartition> allPartitions = Arrays.asList(TP, t0p1, t1p0, t1p1);
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = getMetadata(allPartitions);
  KafkaPartitionMetricSampleAggregator aggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  for (TopicPartition tp : Arrays.asList(TP, t0p1, t1p0)) {
    populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, aggregator, tp);
  }
  // Let t1p1 miss two consecutive windows and the most recent window.
  populateSampleAggregator(5, MIN_SAMPLES_PER_WINDOW, aggregator, t1p1);
  CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 8, MIN_SAMPLES_PER_WINDOW,
                                                      aggregator,
                                                      new PartitionEntity(t1p1),
                                                      7, WINDOW_MS,
                                                      KafkaMetricDef.commonMetricDef());
  return new TestContext(metadata, aggregator);
}
 
Example #7
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test
public void testNotEnoughWindows() {
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = getMetadata(Collections.singleton(TP));
  KafkaPartitionMetricSampleAggregator
      metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);

  try {
    // Only 4 windows have smaller timestamp than the timestamp we passed in.
    ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(NUM_WINDOWS, 0.0, false);
    metricSampleAggregator.aggregate(metadata.fetch(),
                                     -1L,
                                     (NUM_WINDOWS - 1) * WINDOW_MS - 1,
                                     requirements,
                                     new OperationProgress());
    fail("Should throw NotEnoughValidWindowsException");
  } catch (NotEnoughValidWindowsException nse) {
    // let it go
  }
}
 
Example #8
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
@Test
public void testTooManyFlaws() throws NotEnoughValidWindowsException {
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = getMetadata(Collections.singleton(TP));
  KafkaPartitionMetricSampleAggregator
      metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  // Only give two samples to the aggregator.
  CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 2, MIN_SAMPLES_PER_WINDOW,
                                                      metricSampleAggregator, PE, 3, WINDOW_MS,
                                                      KafkaMetricDef.commonMetricDef());


    MetricSampleAggregationResult<String, PartitionEntity> result =
        metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS, new OperationProgress());
    // Partition "topic-0" is expected to be a valid partition in result, with valid sample values collected for window [1, NUM_WINDOW - 3].
    assertEquals(NUM_WINDOWS - 3, result.valuesAndExtrapolations().get(PE).windows().size());
}
 
Example #9
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test
public void testFallbackToAvgAdjacent() throws NotEnoughValidWindowsException {
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  TopicPartition anotherTopicPartition = new TopicPartition("AnotherTopic", 1);
  PartitionEntity anotherPartitionEntity = new PartitionEntity(anotherTopicPartition);
  Metadata metadata = getMetadata(Arrays.asList(TP, anotherTopicPartition));
  KafkaPartitionMetricSampleAggregator
      metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  // Only give one sample to the aggregator for previous period.
  populateSampleAggregator(NUM_WINDOWS, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
  // Create let (NUM_WINDOWS + 1) have enough samples.
  CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator,
                                                      PE, NUM_WINDOWS, WINDOW_MS,
                                                      KafkaMetricDef.commonMetricDef());
  // Let a window exist but not containing samples for partition 0
  CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator,
                                                      anotherPartitionEntity, NUM_WINDOWS + 1, WINDOW_MS, KafkaMetricDef
                                                          .commonMetricDef());
  // Let the rest of the window has enough samples.
  CruiseControlUnitTestUtils.populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW,
                                                      metricSampleAggregator, PE,
                                                      NUM_WINDOWS + 2, WINDOW_MS,
                                                      KafkaMetricDef.commonMetricDef());

    MetricSampleAggregationResult<String, PartitionEntity> result =
        metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS * 2, new OperationProgress());
    int numWindows = result.valuesAndExtrapolations().get(PE).metricValues().length();
    assertEquals(NUM_WINDOWS, numWindows);
    int numExtrapolations = 0;
    for (Map.Entry<Integer, Extrapolation> entry : result.valuesAndExtrapolations().get(PE).extrapolations().entrySet()) {
      assertEquals(Extrapolation.AVG_ADJACENT, entry.getValue());
      numExtrapolations++;
    }
    assertEquals(1, numExtrapolations);


}
 
Example #10
Source File: KafkaIT.java    From uavstack with Apache License 2.0 5 votes vote down vote up
/**
 * @param metadata
 * @return
 */
private static void getPollHost(Metadata metadata) {
    if ("".equals(pollHost)) {
        List<Node> nodesList = metadata.fetch().nodes();
        List<String> nList = new ArrayList<String>();
        for (int i = 0; i < nodesList.size(); i++) {
            nList.add(nodesList.get(i).host() + ":" + nodesList.get(i).port());
        }
        Collections.sort(nList);
        pollHost = "mq:kafka://" + StringHelper.join(nList, ",");
    }
}
 
Example #11
Source File: LoadMonitorTaskRunnerTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test
public void testSamplingError() {
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = new Metadata(METADATA_REFRESH_BACKOFF,
                                   METADATA_EXPIRY_MS,
                                   new LogContext(),
                                   new ClusterResourceListeners());
  MetadataClient metadataClient = new MetadataClient(config, metadata, -1L, TIME);
  MockPartitionMetricSampleAggregator mockMetricSampleAggregator =
      new MockPartitionMetricSampleAggregator(config, metadata);
  KafkaBrokerMetricSampleAggregator mockBrokerMetricSampleAggregator =
      EasyMock.mock(KafkaBrokerMetricSampleAggregator.class);
  MetricRegistry dropwizardMetricRegistry = new MetricRegistry();
  MetricSampler sampler = new MockSampler(0);
  MetricFetcherManager fetcherManager =
      new MetricFetcherManager(config, mockMetricSampleAggregator, mockBrokerMetricSampleAggregator, metadataClient,
                               METRIC_DEF, TIME, dropwizardMetricRegistry, null, sampler);
  LoadMonitorTaskRunner loadMonitorTaskRunner =
      new LoadMonitorTaskRunner(config, fetcherManager, mockMetricSampleAggregator, mockBrokerMetricSampleAggregator,
                                metadataClient, null, TIME);
  while (metadata.fetch().topics().size() < 100) {
    metadataClient.refreshMetadata();
  }
  loadMonitorTaskRunner.start(true);

  int numSamples = 0;
  long startMs = System.currentTimeMillis();
  BlockingQueue<PartitionMetricSample> sampleQueue = mockMetricSampleAggregator.metricSampleQueue();
  while (numSamples < (NUM_PARTITIONS * NUM_TOPICS) * 10 && System.currentTimeMillis() < startMs + 10000) {
    PartitionMetricSample sample = sampleQueue.poll();
    if (sample != null) {
      numSamples++;
    }
  }
  int expectedNumSamples = NUM_TOPICS * NUM_PARTITIONS;
  assertEquals("Only see " + numSamples + " samples. Expecting " + expectedNumSamples + " samples",
      expectedNumSamples, numSamples);
  fetcherManager.shutdown();
}
 
Example #12
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Three topics with 2 partitions each.
 * T0P1 missing window 18000 (index=17), 19000 (index=18)
 * T1P1 missing window 6000 (index=5), 7000 (index=6)
 * Other partitions have all data.
 */
private TestContext setupScenario3() {
  TopicPartition t0p1 = new TopicPartition(TOPIC0, 1);
  TopicPartition t1p0 = new TopicPartition("TOPIC1", 0);
  TopicPartition t1p1 = new TopicPartition("TOPIC1", 1);
  TopicPartition t2p0 = new TopicPartition("TOPIC2", 0);
  TopicPartition t2p1 = new TopicPartition("TOPIC2", 1);
  List<TopicPartition> allPartitions = Arrays.asList(TP, t0p1, t1p0, t1p1, t2p0, t2p1);
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = getMetadata(allPartitions);
  KafkaPartitionMetricSampleAggregator aggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  for (TopicPartition tp : Arrays.asList(TP, t1p0, t2p0, t2p1)) {
    populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, aggregator, tp);
  }
  // Let t0p1 miss the second and the third latest window.
  populateSampleAggregator(NUM_WINDOWS - 3, MIN_SAMPLES_PER_WINDOW, aggregator, t0p1);
  CruiseControlUnitTestUtils.populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW, aggregator,
                                                      new PartitionEntity(t0p1),
                                                      NUM_WINDOWS - 1, WINDOW_MS,
                                                      KafkaMetricDef.commonMetricDef());
  // let t1p1 miss another earlier window
  populateSampleAggregator(5, MIN_SAMPLES_PER_WINDOW, aggregator, t1p1);
  CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 6, MIN_SAMPLES_PER_WINDOW,
                                                      aggregator, new PartitionEntity(t1p1),
                                                      7, WINDOW_MS,
                                                      KafkaMetricDef.commonMetricDef());
  return new TestContext(metadata, aggregator);
}
 
Example #13
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Two topics with 2 partitions each. No data missing.
 */
private TestContext setupScenario1() {
  TopicPartition t0p1 = new TopicPartition(TOPIC0, 1);
  TopicPartition t1p0 = new TopicPartition("TOPIC1", 0);
  TopicPartition t1p1 = new TopicPartition("TOPIC1", 1);
  List<TopicPartition> allPartitions = Arrays.asList(TP, t0p1, t1p0, t1p1);
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = getMetadata(allPartitions);
  KafkaPartitionMetricSampleAggregator aggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  for (TopicPartition tp : allPartitions) {
    populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, aggregator, tp);
  }
  return new TestContext(metadata, aggregator);
}
 
Example #14
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test
public void testExcludeInvalidMetricSample() throws NotEnoughValidWindowsException {
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = getMetadata(Collections.singleton(TP));
  KafkaPartitionMetricSampleAggregator
      metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);
  MetricDef metricDef = KafkaMetricDef.commonMetricDef();

  populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
  // Set the leader to be node 1, which is different from the leader in the metadata.
  PartitionMetricSample sampleWithDifferentLeader = new PartitionMetricSample(1, TP);
  sampleWithDifferentLeader.record(metricDef.metricInfo(DISK_USAGE.name()), 10000);
  sampleWithDifferentLeader.record(metricDef.metricInfo(CPU_USAGE.name()), 10000);
  sampleWithDifferentLeader.record(metricDef.metricInfo(LEADER_BYTES_IN.name()), 10000);
  sampleWithDifferentLeader.record(metricDef.metricInfo(LEADER_BYTES_OUT.name()), 10000);
  sampleWithDifferentLeader.close(0);

  // Only populate the CPU metric
  PartitionMetricSample incompletePartitionMetricSample = new PartitionMetricSample(0, TP);
  incompletePartitionMetricSample.record(metricDef.metricInfo(CPU_USAGE.name()), 10000);
  incompletePartitionMetricSample.close(0);

  metricSampleAggregator.addSample(sampleWithDifferentLeader);
  metricSampleAggregator.addSample(incompletePartitionMetricSample);

  // Check the window value and make sure the metric samples above are excluded.
  Map<PartitionEntity, ValuesAndExtrapolations> valuesAndExtrapolations =
      metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS, new OperationProgress())
                            .valuesAndExtrapolations();
  ValuesAndExtrapolations partitionValuesAndExtrapolations = valuesAndExtrapolations.get(PE);
  for (Resource resource : Resource.cachedValues()) {
    Collection<Short> metricIds = KafkaMetricDef.resourceToMetricIds(resource);
    double expectedValue = (resource == Resource.DISK ? MIN_SAMPLES_PER_WINDOW - 1 : (MIN_SAMPLES_PER_WINDOW - 1) / 2.0)
                           / (resource == Resource.CPU ? UNIT_INTERVAL_TO_PERCENTAGE : 1.0) * metricIds.size();
    assertEquals("The utilization for " + resource + " should be " + expectedValue,
                 expectedValue, partitionValuesAndExtrapolations.metricValues().valuesForGroup(resource.name(),
                                                                        KafkaMetricDef.commonMetricDef(),
                                                                        true).get(NUM_WINDOWS - 1), 0.01);
  }
}
 
Example #15
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Test
public void testFallbackToAvgAvailable() throws NotEnoughValidWindowsException {
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = getMetadata(Collections.singleton(TP));
  KafkaPartitionMetricSampleAggregator
      metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  // Only give two sample to the aggregator.
  CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 1, MIN_SAMPLES_PER_WINDOW,
                                                      metricSampleAggregator, PE, 2, WINDOW_MS, KafkaMetricDef.commonMetricDef());
  MetricSampleAggregationResult<String, PartitionEntity> result =
      metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS, new OperationProgress());
  // Partition "topic-0" is expected to be a valid partition in result with valid sample values for window [3, NUM_WINDOWS].
  assertEquals(NUM_WINDOWS - 2, result.valuesAndExtrapolations().get(PE).windows().size());

  populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW - 2, metricSampleAggregator);

  result = metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS, new OperationProgress());
  int numWindows = result.valuesAndExtrapolations().get(PE).metricValues().length();
  assertEquals(NUM_WINDOWS, numWindows);
  int numExtrapolationss = 0;
  for (Map.Entry<Integer, Extrapolation> entry : result.valuesAndExtrapolations().get(PE).extrapolations().entrySet()) {
    assertEquals(Extrapolation.AVG_AVAILABLE, entry.getValue());
    numExtrapolationss++;
  }
  assertEquals(2, numExtrapolationss);
}
 
Example #16
Source File: LoadMonitorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
private TestContext(LoadMonitor loadMonitor,
                    KafkaPartitionMetricSampleAggregator aggregator,
                    KafkaCruiseControlConfig config,
                    Metadata metadata) {
  _loadMonitor = loadMonitor;
  _aggregator = aggregator;
  _config = config;
  _metadata = metadata;
}
 
Example #17
Source File: KafkaPartitionMetricSampleAggregator.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Construct the metric sample aggregator.
 *
 * @param config   The load monitor configurations.
 * @param metadata The metadata of the cluster.
 */
public KafkaPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata) {
  super(config.getInt(MonitorConfig.NUM_PARTITION_METRICS_WINDOWS_CONFIG),
        config.getLong(MonitorConfig.PARTITION_METRICS_WINDOW_MS_CONFIG),
        config.getInt(MonitorConfig.MIN_SAMPLES_PER_PARTITION_METRICS_WINDOW_CONFIG).byteValue(),
        config.getInt(MonitorConfig.PARTITION_METRIC_SAMPLE_AGGREGATOR_COMPLETENESS_CACHE_SIZE_CONFIG),
        KafkaMetricDef.commonMetricDef());
  _metadata = metadata;
  _maxAllowedExtrapolationsPerPartition =
      config.getInt(MonitorConfig.MAX_ALLOWED_EXTRAPOLATIONS_PER_PARTITION_CONFIG);
  _sampleType = SampleType.PARTITION;

}
 
Example #18
Source File: MetadataClient.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
public MetadataClient(KafkaCruiseControlConfig config,
                      Metadata metadata,
                      long metadataTTL,
                      Time time) {
  _metadataGeneration = new AtomicInteger(0);
  _metadata = metadata;
  _refreshMetadataTimeout = config.getLong(MonitorConfig.METADATA_MAX_AGE_CONFIG);
  _time = time;
  List<InetSocketAddress> addresses =
      ClientUtils.parseAndValidateAddresses(config.getList(MonitorConfig.BOOTSTRAP_SERVERS_CONFIG),
                                            ClientDnsLookup.DEFAULT);
  Cluster bootstrapCluster = Cluster.bootstrap(addresses);
  MetadataResponse metadataResponse = KafkaCruiseControlUtils.prepareMetadataResponse(bootstrapCluster.nodes(),
                                                                                      bootstrapCluster.clusterResource().clusterId(),
                                                                                      MetadataResponse.NO_CONTROLLER_ID,
                                                                                      Collections.emptyList());

  _metadata.update(KafkaCruiseControlUtils.REQUEST_VERSION_UPDATE, metadataResponse, time.milliseconds());
  ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(config, time);
  NetworkClientProvider provider = config.getConfiguredInstance(MonitorConfig.NETWORK_CLIENT_PROVIDER_CLASS_CONFIG,
                                                                NetworkClientProvider.class);

  _networkClient = provider.createNetworkClient(config.getLong(MonitorConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG),
                                                new Metrics(),
                                                time,
                                                "load-monitor",
                                                channelBuilder,
                                                _metadata,
                                                config.getString(MonitorConfig.CLIENT_ID_CONFIG),
                                                DEFAULT_MAX_IN_FLIGHT_REQUEST,
                                                config.getLong(MonitorConfig.RECONNECT_BACKOFF_MS_CONFIG),
                                                config.getLong(MonitorConfig.RECONNECT_BACKOFF_MS_CONFIG),
                                                config.getInt(MonitorConfig.SEND_BUFFER_CONFIG),
                                                config.getInt(MonitorConfig.RECEIVE_BUFFER_CONFIG),
                                                config.getInt(MonitorConfig.REQUEST_TIMEOUT_MS_CONFIG),
                                                true,
                                                new ApiVersions());
  _metadataTTL = metadataTTL;
}
 
Example #19
Source File: KarelDbLeaderElector.java    From kareldb with Apache License 2.0 4 votes vote down vote up
public KarelDbLeaderElector(KarelDbConfig config, KarelDbEngine engine) throws KarelDbElectionException {
    try {
        this.engine = engine;
        this.clientId = "kdb-" + KDB_CLIENT_ID_SEQUENCE.getAndIncrement();

        this.myIdentity = findIdentity(
            config.getList(KarelDbConfig.LISTENERS_CONFIG),
            config.getBoolean(KarelDbConfig.LEADER_ELIGIBILITY_CONFIG));

        Map<String, String> metricsTags = new LinkedHashMap<>();
        metricsTags.put("client-id", clientId);
        MetricConfig metricConfig = new MetricConfig().tags(metricsTags);
        List<MetricsReporter> reporters = Collections.singletonList(new JmxReporter(JMX_PREFIX));
        Time time = Time.SYSTEM;

        ClientConfig clientConfig = new ClientConfig(config.originalsWithPrefix("kafkacache."), false);

        this.metrics = new Metrics(metricConfig, reporters, time);
        this.retryBackoffMs = clientConfig.getLong(CommonClientConfigs.RETRY_BACKOFF_MS_CONFIG);
        String groupId = config.getString(KarelDbConfig.CLUSTER_GROUP_ID_CONFIG);
        LogContext logContext = new LogContext("[KarelDB clientId=" + clientId + ", groupId="
            + groupId + "] ");
        this.metadata = new Metadata(
            retryBackoffMs,
            clientConfig.getLong(CommonClientConfigs.METADATA_MAX_AGE_CONFIG),
            logContext,
            new ClusterResourceListeners()
        );
        List<String> bootstrapServers
            = config.getList(KarelDbConfig.KAFKACACHE_BOOTSTRAP_SERVERS_CONFIG);
        List<InetSocketAddress> addresses = ClientUtils.parseAndValidateAddresses(bootstrapServers,
            clientConfig.getString(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG));
        this.metadata.bootstrap(addresses);
        String metricGrpPrefix = "kareldb";

        ChannelBuilder channelBuilder = ClientUtils.createChannelBuilder(clientConfig, time);
        long maxIdleMs = clientConfig.getLong(CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG);

        NetworkClient netClient = new NetworkClient(
            new Selector(maxIdleMs, metrics, time, metricGrpPrefix, channelBuilder, logContext),
            this.metadata,
            clientId,
            100, // a fixed large enough value will suffice
            clientConfig.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG),
            clientConfig.getLong(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG),
            clientConfig.getInt(CommonClientConfigs.SEND_BUFFER_CONFIG),
            clientConfig.getInt(CommonClientConfigs.RECEIVE_BUFFER_CONFIG),
            clientConfig.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
            ClientDnsLookup.forConfig(clientConfig.getString(CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG)),
            time,
            true,
            new ApiVersions(),
            logContext);

        this.client = new ConsumerNetworkClient(
            logContext,
            netClient,
            metadata,
            time,
            retryBackoffMs,
            clientConfig.getInt(CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG),
            Integer.MAX_VALUE
        );
        this.coordinator = new KarelDbCoordinator(
            logContext,
            this.client,
            groupId,
            300000, // Default MAX_POLL_INTERVAL_MS_CONFIG
            10000, // Default SESSION_TIMEOUT_MS_CONFIG)
            3000, // Default HEARTBEAT_INTERVAL_MS_CONFIG
            metrics,
            metricGrpPrefix,
            time,
            retryBackoffMs,
            myIdentity,
            this
        );

        AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds());

        initTimeout = config.getInt(KarelDbConfig.KAFKACACHE_INIT_TIMEOUT_CONFIG);

        LOG.debug("Group member created");
    } catch (Throwable t) {
        // call close methods if internal objects are already constructed
        // this is to prevent resource leak. see KAFKA-2121
        stop(true);
        // now propagate the exception
        throw new KarelDbElectionException("Failed to construct kafka consumer", t);
    }
}
 
Example #20
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testAggregateWithPartitionExtrapolations() throws NotEnoughValidWindowsException {
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = getMetadata(Collections.singleton(TP));
  KafkaPartitionMetricSampleAggregator
      metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  TopicPartition tp1 = new TopicPartition(TOPIC0, 1);
  Cluster cluster = getCluster(Arrays.asList(TP, tp1));
  PartitionEntity pe1 = new PartitionEntity(tp1);

  List<MetadataResponse.PartitionMetadata> partitionMetadata =
      Collections.singletonList(new MetadataResponse.PartitionMetadata(Errors.NONE, 1, NODE_0,
                                                                       Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH),
                                                                       Arrays.asList(nodes()), Arrays.asList(nodes()),
                                                                       Collections.emptyList()));
  List<MetadataResponse.TopicMetadata> topicMetadata = Collections.singletonList(
      new MetadataResponse.TopicMetadata(Errors.NONE, TOPIC0, false, partitionMetadata));

  MetadataResponse metadataResponse = KafkaCruiseControlUtils.prepareMetadataResponse(cluster.nodes(),
                                                                                      cluster.clusterResource().clusterId(),
                                                                                      MetadataResponse.NO_CONTROLLER_ID,
                                                                                      topicMetadata);
  metadata.update(KafkaCruiseControlUtils.REQUEST_VERSION_UPDATE, metadataResponse, 1);
  populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);
  //Populate partition 1 but leave 1 hole at NUM_WINDOWS'th window.
  CruiseControlUnitTestUtils.populateSampleAggregator(NUM_WINDOWS - 2, MIN_SAMPLES_PER_WINDOW,
                                                      metricSampleAggregator,
                                                      pe1,
                                                      0, WINDOW_MS,
                                                      KafkaMetricDef.commonMetricDef());
  CruiseControlUnitTestUtils.populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW,
                                                      metricSampleAggregator,
                                                      pe1,
                                                      NUM_WINDOWS - 1, WINDOW_MS,
                                                      KafkaMetricDef.commonMetricDef());
  MetricSampleAggregationResult<String, PartitionEntity> result =
      metricSampleAggregator.aggregate(cluster, Long.MAX_VALUE, new OperationProgress());
  assertEquals(2, result.valuesAndExtrapolations().size());
  assertTrue(result.valuesAndExtrapolations().get(PE).extrapolations().isEmpty());
  assertEquals(1, result.valuesAndExtrapolations().get(pe1).extrapolations().size());
  assertTrue(result.valuesAndExtrapolations().get(pe1).extrapolations().containsKey(1));
  assertEquals((NUM_WINDOWS - 1) * WINDOW_MS, result.valuesAndExtrapolations().get(pe1).window(1));
  assertEquals(Extrapolation.AVG_ADJACENT, result.valuesAndExtrapolations().get(pe1).extrapolations().get(1));
}
 
Example #21
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testAggregateWithUpdatedCluster() throws NotEnoughValidWindowsException {
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = getMetadata(Collections.singleton(TP));
  KafkaPartitionMetricSampleAggregator
      metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);

  TopicPartition tp1 = new TopicPartition(TOPIC0 + "1", 0);
  Cluster cluster = getCluster(Arrays.asList(TP, tp1));

  List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(2);
  topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE,
                                                       TOPIC0,
                                                       false,
                                                       Collections.singletonList(new MetadataResponse.PartitionMetadata(
                                                           Errors.NONE, PARTITION, NODE_0,
                                                           Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH),
                                                           Arrays.asList(nodes()), Arrays.asList(nodes()),
                                                           Collections.emptyList()))));
  topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE,
                                                       TOPIC0 + "1",
                                                       false,
                                                       Collections.singletonList(new MetadataResponse.PartitionMetadata(
                                                           Errors.NONE, 0, NODE_0,
                                                           Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH),
                                                           Arrays.asList(nodes()), Arrays.asList(nodes()),
                                                           Collections.emptyList()))));

  MetadataResponse metadataResponse = KafkaCruiseControlUtils.prepareMetadataResponse(cluster.nodes(),
                                                                                      cluster.clusterResource().clusterId(),
                                                                                      MetadataResponse.NO_CONTROLLER_ID,
                                                                                      topicMetadata);
  metadata.update(KafkaCruiseControlUtils.REQUEST_VERSION_UPDATE, metadataResponse, 1);


  Map<PartitionEntity, ValuesAndExtrapolations> aggregateResult =
      metricSampleAggregator.aggregate(cluster, Long.MAX_VALUE, new OperationProgress()).valuesAndExtrapolations();
  // Partition "topic-0" should be valid in all NUM_WINDOW windows and Partition "topic1-0" should not since
  // there is no sample for it.
  assertEquals(1, aggregateResult.size());
  assertEquals(NUM_WINDOWS, aggregateResult.get(PE).windows().size());

  ModelCompletenessRequirements requirements =
      new ModelCompletenessRequirements(1, 0.0, true);
  MetricSampleAggregationResult<String, PartitionEntity> result =
      metricSampleAggregator.aggregate(cluster, -1, Long.MAX_VALUE, requirements, new OperationProgress());
  aggregateResult = result.valuesAndExtrapolations();
  assertNotNull("tp1 should be included because includeAllTopics is set to true",
                aggregateResult.get(new PartitionEntity(tp1)));
  Map<Integer, Extrapolation> extrapolations = aggregateResult.get(new PartitionEntity(tp1)).extrapolations();
  assertEquals(NUM_WINDOWS, extrapolations.size());

  for (int i = 0; i < NUM_WINDOWS; i++) {
    assertEquals(Extrapolation.NO_VALID_EXTRAPOLATION, extrapolations.get(i));
  }
}
 
Example #22
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testAggregate() throws NotEnoughValidWindowsException {
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = getMetadata(Collections.singleton(TP));
  KafkaPartitionMetricSampleAggregator
      metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata);

  populateSampleAggregator(NUM_WINDOWS + 1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator);

  MetricSampleAggregationResult<String, PartitionEntity> result =
      metricSampleAggregator.aggregate(metadata.fetch(), Long.MAX_VALUE, new OperationProgress());
  Map<PartitionEntity, ValuesAndExtrapolations> valuesAndExtrapolations = result.valuesAndExtrapolations();

  assertEquals("The windows should only have one partition", 1, valuesAndExtrapolations.size());
  ValuesAndExtrapolations partitionValuesAndExtrapolations = valuesAndExtrapolations.get(PE);
  assertNotNull(partitionValuesAndExtrapolations);
  assertEquals(NUM_WINDOWS, partitionValuesAndExtrapolations.metricValues().length());
  for (int i = 0; i < NUM_WINDOWS; i++) {
    assertEquals((NUM_WINDOWS - i) * WINDOW_MS, result.valuesAndExtrapolations().get(PE).window(i));
    for (Resource resource : Resource.cachedValues()) {
      Collection<Short> metricIds = KafkaMetricDef.resourceToMetricIds(resource);
      double expectedValue = (resource == Resource.DISK ?
          (NUM_WINDOWS - 1 - i) * 10 + MIN_SAMPLES_PER_WINDOW - 1 :
          (NUM_WINDOWS - 1 - i) * 10 + (MIN_SAMPLES_PER_WINDOW - 1) / 2.0)
          / (resource == Resource.CPU ? UNIT_INTERVAL_TO_PERCENTAGE : 1.0) * metricIds.size();
      assertEquals("The utilization for " + resource + " should be " + expectedValue,
                   expectedValue, partitionValuesAndExtrapolations.metricValues().valuesForGroup(resource.name(),
                                                                          KafkaMetricDef.commonMetricDef(),
                                                                          true).get(i), 0.01);
    }
  }

  // Verify the metric completeness checker state
  MetadataClient.ClusterAndGeneration clusterAndGeneration =
      new MetadataClient.ClusterAndGeneration(metadata.fetch(), 1);
  assertEquals(NUM_WINDOWS, metricSampleAggregator.validWindows(clusterAndGeneration.cluster(), 1.0).size());
  Map<Long, Float> monitoredPercentages = metricSampleAggregator.validPartitionRatioByWindows(clusterAndGeneration.cluster());
  for (double percentage : monitoredPercentages.values()) {
    assertEquals(1.0, percentage, 0.0);
  }
  assertEquals(NUM_WINDOWS, metricSampleAggregator.availableWindows().size());
}
 
Example #23
Source File: LoadMonitorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
private Metadata metadata() {
  return _metadata;
}
 
Example #24
Source File: Executor.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
/**
 * The executor class that execute the proposals generated by optimizer.
 * Package private for unit test.
 *
 * @param config The configurations for Cruise Control.
 */
Executor(KafkaCruiseControlConfig config,
         Time time,
         MetricRegistry dropwizardMetricRegistry,
         MetadataClient metadataClient,
         long demotionHistoryRetentionTimeMs,
         long removalHistoryRetentionTimeMs,
         ExecutorNotifier executorNotifier,
         UserTaskManager userTaskManager,
         AnomalyDetector anomalyDetector) {
    String zkUrl = config.getString(ExecutorConfig.ZOOKEEPER_CONNECT_CONFIG);
  _numExecutionStopped = new AtomicInteger(0);
  _numExecutionStoppedByUser = new AtomicInteger(0);
  _executionStoppedByUser = new AtomicBoolean(false);
  _ongoingExecutionIsBeingModified = new AtomicBoolean(false);
  _numExecutionStartedInKafkaAssignerMode = new AtomicInteger(0);
  _numExecutionStartedInNonKafkaAssignerMode = new AtomicInteger(0);
  _isKafkaAssignerMode = false;
  _config = config;
  // Register gauge sensors.
  registerGaugeSensors(dropwizardMetricRegistry);

  _time = time;
  boolean zkSecurityEnabled = config.getBoolean(ExecutorConfig.ZOOKEEPER_SECURITY_ENABLED_CONFIG);
  _kafkaZkClient = KafkaCruiseControlUtils.createKafkaZkClient(zkUrl, ZK_EXECUTOR_METRIC_GROUP, ZK_EXECUTOR_METRIC_TYPE,
      zkSecurityEnabled);
  _adminClient = KafkaCruiseControlUtils.createAdminClient(KafkaCruiseControlUtils.parseAdminClientConfigs(config));
  _executionTaskManager = new ExecutionTaskManager(_adminClient, dropwizardMetricRegistry, time, config);
  _metadataClient = metadataClient != null ? metadataClient
                                           : new MetadataClient(config,
                                                                new Metadata(METADATA_REFRESH_BACKOFF,
                                                                             METADATA_EXPIRY_MS,
                                                                             new LogContext(),
                                                                             new ClusterResourceListeners()),
                                                                -1L,
                                                                time);
  _defaultExecutionProgressCheckIntervalMs = config.getLong(ExecutorConfig.EXECUTION_PROGRESS_CHECK_INTERVAL_MS_CONFIG);
  _leaderMovementTimeoutMs = config.getLong(ExecutorConfig.LEADER_MOVEMENT_TIMEOUT_MS_CONFIG);
  _requestedExecutionProgressCheckIntervalMs = null;
  _proposalExecutor =
      Executors.newSingleThreadExecutor(new KafkaCruiseControlThreadFactory("ProposalExecutor", false, LOG));
  _latestDemoteStartTimeMsByBrokerId = new ConcurrentHashMap<>();
  _latestRemoveStartTimeMsByBrokerId = new ConcurrentHashMap<>();
  _executorState = ExecutorState.noTaskInProgress(recentlyDemotedBrokers(), recentlyRemovedBrokers());
  _stopSignal = new AtomicInteger(NO_STOP_EXECUTION);
  _hasOngoingExecution = false;
  _uuid = null;
  _reasonSupplier = null;
  _executorNotifier = executorNotifier != null ? executorNotifier
                                               : config.getConfiguredInstance(ExecutorConfig.EXECUTOR_NOTIFIER_CLASS_CONFIG,
                                                                              ExecutorNotifier.class);
  _userTaskManager = userTaskManager;
  _anomalyDetector = anomalyDetector;
  _demotionHistoryRetentionTimeMs = demotionHistoryRetentionTimeMs;
  _removalHistoryRetentionTimeMs = removalHistoryRetentionTimeMs;
  _executionHistoryScannerExecutor = Executors.newSingleThreadScheduledExecutor(
      new KafkaCruiseControlThreadFactory("ExecutionHistoryScanner", true, null));
  _executionHistoryScannerExecutor.scheduleAtFixedRate(new ExecutionHistoryScanner(),
                                                       EXECUTION_HISTORY_SCANNER_INITIAL_DELAY_SECONDS,
                                                       EXECUTION_HISTORY_SCANNER_PERIOD_SECONDS,
                                                       TimeUnit.SECONDS);
}
 
Example #25
Source File: KafkaPartitionMetricSampleAggregatorTest.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
TestContext(Metadata metadata, KafkaPartitionMetricSampleAggregator aggregator) {
  _metadata = metadata;
  _aggregator = aggregator;
}
 
Example #26
Source File: LoadMonitorTaskRunnerTest.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
@Test
public void testSimpleFetch() throws InterruptedException {
  KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties());
  Metadata metadata = new Metadata(METADATA_REFRESH_BACKOFF,
                                   METADATA_EXPIRY_MS,
                                   new LogContext(),
                                   new ClusterResourceListeners());
  MetadataClient metadataClient = new MetadataClient(config, metadata, -1L, TIME);
  MockPartitionMetricSampleAggregator mockPartitionMetricSampleAggregator =
      new MockPartitionMetricSampleAggregator(config, metadata);
  KafkaBrokerMetricSampleAggregator mockBrokerMetricSampleAggregator =
      EasyMock.mock(KafkaBrokerMetricSampleAggregator.class);
  MetricRegistry dropwizardMetricRegistry = new MetricRegistry();
  MetricSampler sampler = new MockSampler(0);
  MetricFetcherManager fetcherManager =
      new MetricFetcherManager(config, mockPartitionMetricSampleAggregator, mockBrokerMetricSampleAggregator,
                               metadataClient, METRIC_DEF, TIME, dropwizardMetricRegistry, null, sampler);
  LoadMonitorTaskRunner loadMonitorTaskRunner =
      new LoadMonitorTaskRunner(config, fetcherManager, mockPartitionMetricSampleAggregator,
                                mockBrokerMetricSampleAggregator, metadataClient, null, TIME);
  while (metadata.fetch().topics().size() < NUM_TOPICS) {
    Thread.sleep(10);
    metadataClient.refreshMetadata();
  }
  loadMonitorTaskRunner.start(true);

  Set<TopicPartition> partitionsToSample = new HashSet<>(NUM_TOPICS * NUM_PARTITIONS);
  for (int i = 0; i < NUM_TOPICS; i++) {
    for (int j = 0; j < NUM_PARTITIONS; j++) {
      partitionsToSample.add(new TopicPartition("topic-" + i, j));
    }
  }

  long startMs = System.currentTimeMillis();
  BlockingQueue<PartitionMetricSample> sampleQueue = mockPartitionMetricSampleAggregator.metricSampleQueue();
  while (!partitionsToSample.isEmpty() && System.currentTimeMillis() < startMs + 10000) {
    PartitionMetricSample sample = sampleQueue.poll();
    if (sample != null) {
      assertTrue("The topic partition should have been sampled and sampled only once.",
          partitionsToSample.contains(sample.entity().tp()));
      partitionsToSample.remove(sample.entity().tp());
    }
  }
  assertTrue("Did not see sample for partitions " + Arrays.toString(partitionsToSample.toArray()),
      partitionsToSample.isEmpty());
  fetcherManager.shutdown();
  assertTrue(sampleQueue.isEmpty());
}
 
Example #27
Source File: MetadataClient.java    From cruise-control with BSD 2-Clause "Simplified" License 4 votes vote down vote up
/**
 * @return The metadata maintained by this metadata client.
 */
public Metadata metadata() {
  return _metadata;
}
 
Example #28
Source File: KafkaIT.java    From uavstack with Apache License 2.0 4 votes vote down vote up
public static void startPoll(String appid, String methodName, Object[] args) {
    KafkaIT kit = new KafkaIT(appid);
    getPollHost((Metadata) args[2]);
    kit.doPollStart(methodName, args);
    tl.set(kit);
}
 
Example #29
Source File: KarelDbCoordinatorTest.java    From kareldb with Apache License 2.0 4 votes vote down vote up
@Before
public void setup() {
    this.time = new MockTime();
    this.metadata = new Metadata(0, Long.MAX_VALUE, new LogContext(), new ClusterResourceListeners());
    this.client = new MockClient(time, new MockClient.MockMetadataUpdater() {
        @Override
        public List<Node> fetchNodes() {
            return cluster.nodes();
        }

        @Override
        public boolean isUpdateNeeded() {
            return false;
        }

        @Override
        public void update(Time time, MockClient.MetadataUpdate update) {
            throw new UnsupportedOperationException();
        }
    });

    LogContext logContext = new LogContext();
    this.consumerClient = new ConsumerNetworkClient(logContext, client, metadata, time, 100, 1000, Integer.MAX_VALUE);
    this.metrics = new Metrics(time);
    this.rebalanceListener = new MockRebalanceListener();

    this.coordinator = new KarelDbCoordinator(
        logContext,
        consumerClient,
        groupId,
        rebalanceTimeoutMs,
        sessionTimeoutMs,
        heartbeatIntervalMs,
        metrics,
        "kdb-" + groupId,
        time,
        retryBackoffMs,
        LEADER_INFO,
        rebalanceListener
    );
}
 
Example #30
Source File: LoadMonitorTaskRunnerTest.java    From cruise-control with BSD 2-Clause "Simplified" License 2 votes vote down vote up
/**
 * Construct the metric sample aggregator.
 * @param config   The load monitor configurations.
 * @param metadata The metadata of the cluster.
 */
MockPartitionMetricSampleAggregator(KafkaCruiseControlConfig config, Metadata metadata) {
  super(config, metadata);
  _partitionMetricSamples = new ArrayBlockingQueue<>(10000);
}