org.apache.kafka.common.config.TopicConfig Java Examples

The following examples show how to use org.apache.kafka.common.config.TopicConfig. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AbstractKafkaRepository.java    From SkaETL with Apache License 2.0 8 votes vote down vote up
public AbstractKafkaRepository(String name, Serde<V> valueSerde, Function<V,String> keyFunction, KafkaAdminService kafkaAdminService, KafkaConfiguration kafkaConfiguration) {
    this.repositoryName = name + "-db";
    this.keyFunction = keyFunction;
    this.producer = KafkaUtils.kafkaProducer(kafkaConfiguration.getBootstrapServers(), StringSerializer.class, JsonNodeSerialializer.class);
    kafkaAdminService.createTopic(kafkaAdminService.buildTopicInfo(repositoryName,TopicConfig.CLEANUP_POLICY_COMPACT));

    Properties props = KafkaUtils.createKStreamProperties(repositoryName + "-stream"+ UUID.randomUUID().toString(), kafkaConfiguration.getBootstrapServers());
    StreamsBuilder builder = new StreamsBuilder();

    final GlobalKTable<String, V> globalKTable = builder.globalTable(repositoryName, materialize(valueSerde));

    final KafkaStreams streams = new KafkaStreams(builder.build(), props);
    streams.start();
    producer.flush();
    keyValueStore = streams.store(getStoreName(), QueryableStoreTypes.keyValueStore());

    Runtime.getRuntime().addShutdownHook(new Thread(streams::close));

}
 
Example #2
Source File: KafkaAdminClient.java    From data-highway with Apache License 2.0 6 votes vote down vote up
public void createTopic(KafkaRoad road) throws KafkaException {
  Properties topicConfig = new Properties(defaultTopicConfig);
  topicConfig.setProperty(LEADER_THROTTLED_REPLICAS, WILDCARD);
  topicConfig.setProperty(FOLLOWER_THROTTLED_REPLICAS, WILDCARD);
  RoadType roadType = ofNullable(road.getType()).orElse(RoadType.NORMAL);
  switch (roadType) {
  case NORMAL:
    topicConfig.setProperty(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE);
    break;
  case COMPACT:
    topicConfig.setProperty(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT);
    break;
  default:
    throw new KafkaException("Unhandled road type \"" + road.getType().name() + "\"");
  }
  AdminUtils
      .createTopic(zkUtils, road.getTopicName(), partitions, replicationFactor, topicConfig, DEFAULT_RACK_AWARE_MODE);
  log.info("Created {} topic {}", roadType, road.getTopicName());
}
 
Example #3
Source File: KafkaSecUtils.java    From bdt with Apache License 2.0 6 votes vote down vote up
public void createTopic(String topic, String numPartitions) throws Exception {
    // Create the testAT topic
    Map<String, String> topicProperties = new HashMap<String, String>() {
        {
            put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1");
        }
    };

    logger.debug("Creating topic: " + topic);
    NewTopic newTopic = new NewTopic(topic, 1, (short) 1).configs(topicProperties);
    adminClient.createTopics(asList(newTopic)).all().get(KAFKA_DEFAULT_TIMEOUT_MS, TimeUnit.MILLISECONDS);
    logger.debug("Topic created.");

    if (numPartitions != null) {
        // create partitions
        logger.debug("Creating: " + numPartitions + " partitions in topic: " + topic);
        Map<String, NewPartitions> partitions = new HashMap<String, NewPartitions>() {
            {
                put (topic, NewPartitions.increaseTo(Integer.parseInt(numPartitions)));
            }
        };

        adminClient.createPartitions(partitions).all().get(KAFKA_DEFAULT_TIMEOUT_MS, TimeUnit.MILLISECONDS);
        logger.debug("Partitions created.");
    }
}
 
Example #4
Source File: KafkaSystemAdmin.java    From samza with Apache License 2.0 6 votes vote down vote up
/**
 * Fetch stream properties for all intermediate streams.
 *
 * @param config kafka system config
 * @return a {@link Map} from {@code streamId} to stream {@link Properties}
 */
@VisibleForTesting
static Map<String, Properties> getIntermediateStreamProperties(Config config) {
  Map<String, Properties> intermedidateStreamProperties = Collections.emptyMap();
  ApplicationConfig appConfig = new ApplicationConfig(config);

  if (appConfig.getAppMode() == ApplicationConfig.ApplicationMode.BATCH) {
    StreamConfig streamConfig = new StreamConfig(config);
    intermedidateStreamProperties = streamConfig.getStreamIds()
        .stream()
        .filter(streamConfig::getIsIntermediateStream)
        .collect(Collectors.toMap(Function.identity(), streamId -> {
          Properties properties = new Properties();
          properties.putAll(streamConfig.getStreamProperties(streamId));
          properties.putIfAbsent(TopicConfig.RETENTION_MS_CONFIG, String.valueOf(KafkaConfig.DEFAULT_RETENTION_MS_FOR_BATCH()));
          return properties;
        }));
  }

  return intermedidateStreamProperties;
}
 
Example #5
Source File: KafkaAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
@Test
public void testMinIsrMoreThanReplicas(VertxTestContext context) {
    KSB ksb = new KSB()
            .addNewTopic("A", false)
                .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2")
                .addNewPartition(0)
                    .replicaOn(0)
                    .leader(0)
                    .isr(0)
                .endPartition()
            .endTopic()
            .addBroker(3);

    KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac());

    Checkpoint a = context.checkpoint(ksb.brokers.size());
    for (Integer brokerId : ksb.brokers.keySet()) {
        kafkaAvailability.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> {
            assertTrue(canRoll,
                    "broker " + brokerId + " should be rollable, being minisr = 2, but only 1 replicas");

            a.flag();
        })));
    }
}
 
Example #6
Source File: KafkaAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
@Test
public void testMinIsrEqualsReplicasWithOfflineReplicas(VertxTestContext context) {
    KSB ksb = new KSB()
            .addNewTopic("A", false)
            .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3")
            .addNewPartition(0)
            .replicaOn(0, 1, 2)
            .leader(0)
            .isr(0, 1)
            .endPartition()
            .endTopic()

            .addBroker(3);

    KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac());

    Checkpoint a = context.checkpoint(ksb.brokers.size());
    for (Integer brokerId : ksb.brokers.keySet()) {
        kafkaAvailability.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> {
            assertTrue(canRoll,
                    "broker " + brokerId + " should be rollable, being minisr = 3, but only 3 replicas");

            a.flag();
        })));
    }
}
 
Example #7
Source File: KafkaAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
@Test
public void testMinIsrEqualsReplicas(VertxTestContext context) {
    KSB ksb = new KSB()
            .addNewTopic("A", false)
                .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3")
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    .leader(0)
                    .isr(0, 1, 2)
                .endPartition()
            .endTopic()

            .addBroker(3);

    KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac());

    Checkpoint a = context.checkpoint(ksb.brokers.size());
    for (Integer brokerId : ksb.brokers.keySet()) {
        kafkaAvailability.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> {
            assertTrue(canRoll,
                    "broker " + brokerId + " should be rollable, being minisr = 3, but only 3 replicas");

            a.flag();
        })));
    }
}
 
Example #8
Source File: TopicReplicationFactorAnomalyFinder.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Retrieve topic minISR config information if it is not cached locally.
 * @param topicsToCheck Set of topics to check.
 */
private void maybeRetrieveAndCacheTopicMinISR(Set<String> topicsToCheck) {
  Set<ConfigResource> topicResourcesToCheck = new HashSet<>(topicsToCheck.size());
  topicsToCheck.stream().filter(t -> !_cachedTopicMinISR.containsKey(t))
                        .forEach(t -> topicResourcesToCheck.add(new ConfigResource(ConfigResource.Type.TOPIC, t)));
  if (topicResourcesToCheck.isEmpty()) {
    return;
  }
  for (Map.Entry<ConfigResource, KafkaFuture<Config>> entry : _adminClient.describeConfigs(topicResourcesToCheck).values().entrySet()) {
    try {
      short topicMinISR = Short.parseShort(entry.getValue().get(DESCRIBE_TOPIC_CONFIG_TIMEOUT_MS, TimeUnit.MILLISECONDS)
                                                .get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG).value());
      _cachedTopicMinISR.put(entry.getKey().name(), new TopicMinISREntry(topicMinISR, System.currentTimeMillis()));
    } catch (TimeoutException | InterruptedException | ExecutionException e) {
      LOG.warn("Skip attempt to fix replication factor of topic {} due to unable to retrieve its minISR config.",
               entry.getKey().name());
    }
  }
}
 
Example #9
Source File: KsqlRestApplicationTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldEnsureCommandTopicHasInfiniteRetention() {
  final Map<String, Object> retentionConfig = ImmutableMap.of(
      TopicConfig.RETENTION_MS_CONFIG, Long.MAX_VALUE
  );
  EasyMock.expect(topicClient.isTopicExists(COMMAND_TOPIC)).andReturn(true);
  EasyMock.expect(topicClient.addTopicConfig(COMMAND_TOPIC, retentionConfig)).andReturn(true);

  EasyMock.replay(topicClient);

  KsqlRestApplication.ensureCommandTopic(restConfig,
                                         topicClient,
                                         COMMAND_TOPIC);

  EasyMock.verify(topicClient);
}
 
Example #10
Source File: KafkaTopicClientImplTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldRetryAddingTopicConfig() {
  final Map<String, ?> overrides = ImmutableMap.of(
      TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT
  );

  expect(adminClient.describeConfigs(anyObject()))
      .andReturn(topicConfigResponse(
          "peter",
          overriddenConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "12345"),
          defaultConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy")
      ));

  expect(adminClient.alterConfigs(anyObject()))
      .andReturn(alterTopicConfigResponse(new DisconnectException()))
      .andReturn(alterTopicConfigResponse());
  replay(adminClient);

  KafkaTopicClient kafkaTopicClient = new KafkaTopicClientImpl(adminClient);
  kafkaTopicClient.addTopicConfig("peter", overrides);

  verify(adminClient);
}
 
Example #11
Source File: KafkaTopicClientImplTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldNotAlterConfigIfConfigNotChanged() {
  final Map<String, ?> overrides = ImmutableMap.of(
      TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT
  );

  expect(adminClient.describeConfigs(topicConfigsRequest("peter")))
      .andReturn(topicConfigResponse(
          "peter",
          overriddenConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG,
                                TopicConfig.CLEANUP_POLICY_COMPACT)
      ));

  replay(adminClient);

  KafkaTopicClient kafkaTopicClient = new KafkaTopicClientImpl(adminClient);
  kafkaTopicClient.addTopicConfig("peter", overrides);

  verify(adminClient);
}
 
Example #12
Source File: KafkaTopicClientImplTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldHandleRetriableGetTopicConfigError() {
  expect(adminClient.describeConfigs(anyObject()))
      .andReturn(topicConfigResponse(new DisconnectException()))
      .andReturn(topicConfigResponse(
          "fred",
          overriddenConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "12345"),
          defaultConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "producer")
      ));
  replay(adminClient);

  KafkaTopicClient kafkaTopicClient = new KafkaTopicClientImpl(adminClient);
  final Map<String, String> config = kafkaTopicClient.getTopicConfig("fred");

  assertThat(config.get(TopicConfig.RETENTION_MS_CONFIG), is("12345"));
  assertThat(config.get(TopicConfig.COMPRESSION_TYPE_CONFIG), is("producer"));
}
 
Example #13
Source File: KafkaTopicClientImplTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldGetTopicConfig() {
  expect(adminClient.describeConfigs(topicConfigsRequest("fred")))
      .andReturn(topicConfigResponse(
          "fred",
          overriddenConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "12345"),
          defaultConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy")
      ));
  replay(adminClient);

  KafkaTopicClient kafkaTopicClient = new KafkaTopicClientImpl(adminClient);
  final Map<String, String> config = kafkaTopicClient.getTopicConfig("fred");

  assertThat(config.get(TopicConfig.RETENTION_MS_CONFIG), is("12345"));
  assertThat(config.get(TopicConfig.COMPRESSION_TYPE_CONFIG), is("snappy"));
}
 
Example #14
Source File: KafkaTopicClientImplIntegrationTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldCreateTopicWithConfig() {
  // Given:
  final String topicName = UUID.randomUUID().toString();
  final Map<String, String> config = ImmutableMap.of(
      TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy");

  // When:
  client.createTopic(topicName, 2, (short) 1, config);

  // Then:
  assertThatEventually(() -> topicExists(topicName), is(true));
  final TopicDescription topicDescription = getTopicDescription(topicName);
  assertThat(topicDescription.partitions(), hasSize(2));
  assertThat(topicDescription.partitions().get(0).replicas(), hasSize(1));
  final Map<String, String> configs = client.getTopicConfig(topicName);
  assertThat(configs.get(TopicConfig.COMPRESSION_TYPE_CONFIG), is("snappy"));
}
 
Example #15
Source File: SimpleProducer.java    From kafka-platform-prometheus with Apache License 2.0 6 votes vote down vote up
private void createTopic(AdminClient adminClient, String topicName, Integer numberOfPartitions, Short replicationFactor) throws InterruptedException, ExecutionException {
    if (!adminClient.listTopics().names().get().contains(topicName)) {
        logger.info("Creating topic {}", topicName);

        final Map<String, String> configs = replicationFactor < 3 ? Map.of(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1") : Map.of();

        final NewTopic newTopic = new NewTopic(topicName, numberOfPartitions, replicationFactor);
        newTopic.configs(configs);
        try {
            CreateTopicsResult topicsCreationResult = adminClient.createTopics(Collections.singleton(newTopic));
            topicsCreationResult.all().get();
        } catch (ExecutionException e) {
            //silent ignore if topic already exists
        }
    }
}
 
Example #16
Source File: KafkaAdminClient.java    From data-highway with Apache License 2.0 6 votes vote down vote up
public KafkaTopicDetails topicDetails(String topic) {
  Map<Object, List<Object>> map = getPartitionInfo(topic);
  int numPartitions = map.size();
  int numReplicas = map.get(0).size();

  RoadType type;
  Properties topicConfig = AdminUtils.fetchEntityConfig(zkUtils, ConfigType.Topic(), topic);
  String cleanupPolicyConfig = topicConfig
      .getProperty(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE);
  switch (cleanupPolicyConfig) {
  case TopicConfig.CLEANUP_POLICY_COMPACT:
    type = RoadType.COMPACT;
    break;
  default:
    type = RoadType.NORMAL;
  }
  log.debug("numPartitions: {}, numReplicas: {}", numPartitions, numReplicas);
  return new KafkaTopicDetails(type, numPartitions, numReplicas);
}
 
Example #17
Source File: KsqlStructuredDataOutputNode.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
private void createSinkTopic(
    final String kafkaTopicName,
    final KsqlConfig ksqlConfig,
    final KafkaTopicClient kafkaTopicClient,
    final boolean isCompacted
) {
  int numberOfPartitions =
      (Integer) ksqlConfig.get(KsqlConfig.SINK_NUMBER_OF_PARTITIONS_PROPERTY);
  short numberOfReplications =
      (Short) ksqlConfig.get(KsqlConfig.SINK_NUMBER_OF_REPLICAS_PROPERTY);

  final Map<String, ?> config = isCompacted
      ? ImmutableMap.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT)
      : Collections.emptyMap();

  kafkaTopicClient.createTopic(kafkaTopicName,
                               numberOfPartitions,
                               numberOfReplications,
                               config
  );
}
 
Example #18
Source File: KafkaTopicClientImpl.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Override
public TopicCleanupPolicy getTopicCleanupPolicy(final String topicName) {
  final String policy = getTopicConfig(topicName)
      .getOrDefault(TopicConfig.CLEANUP_POLICY_CONFIG, "");

  switch (policy) {
    case "compact":
      return TopicCleanupPolicy.COMPACT;
    case "delete":
      return TopicCleanupPolicy.DELETE;
    case "compact+delete":
      return TopicCleanupPolicy.COMPACT_DELETE;
    default:
      throw new KsqlException("Could not get the topic configs for : " + topicName);
  }
}
 
Example #19
Source File: KsqlStructuredDataOutputNodeTest.java    From ksql-fork-with-deep-learning-function with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldCreateSinkWithCorrectCleanupPolicyNonWindowedTable() {
  KafkaTopicClient topicClientForNonWindowTable = EasyMock.mock(KafkaTopicClient.class);
  KsqlStructuredDataOutputNode outputNode = getKsqlStructuredDataOutputNode(false);
  StreamsBuilder streamsBuilder = new StreamsBuilder();
  Map<String, String> topicConfig = ImmutableMap.of(
      TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT);
  topicClientForNonWindowTable.createTopic("output", 4, (short) 3, topicConfig);
  EasyMock.replay(topicClientForNonWindowTable);
  SchemaKStream schemaKStream = outputNode.buildStream(
      streamsBuilder,
      ksqlConfig,
      topicClientForNonWindowTable,
      new FunctionRegistry(),
      new HashMap<>(),
      new MockSchemaRegistryClient());
  assertThat(schemaKStream, instanceOf(SchemaKTable.class));
  EasyMock.verify();

}
 
Example #20
Source File: GrokPatternLoader.java    From SkaETL with Apache License 2.0 6 votes vote down vote up
@PostConstruct
public void init() throws GrokException, IOException {

    GrokCompiler grok = GrokCompiler.newInstance();
    loadFromResource(grok, "/patterns/patterns");
    loadFromResource(grok, "/patterns/firewall");
    loadFromResource(grok, "/patterns/haproxy");
    loadFromResource(grok, "/patterns/java");
    loadFromResource(grok, "/patterns/linux-syslog");
    loadFromResource(grok, "/patterns/nagios");
    loadFromResource(grok, "/patterns/postfix");
    loadFromResource(grok, "/patterns/ruby");

    Map<String, String> patterns = grok.getPatternDefinitions();
    final String topic = "grok-referential-db";
    kafkaAdminService.createTopic(kafkaAdminService.buildTopicInfo(topic, TopicConfig.CLEANUP_POLICY_COMPACT));
    Producer<String, GrokData> grokProducer = kafkaUtils.kafkaGrokProducer();
    for (Map.Entry<String, String> pattern : patterns.entrySet()) {
        log.info(" GrokPatternLoader Produce with key {} value {}",pattern.getKey(),pattern.getValue());
        ProducerRecord<String, GrokData> record = new ProducerRecord<>(topic, pattern.getKey(), GrokData.builder().key(pattern.getKey()).value(pattern.getValue()).build());
        grokProducer.send(record);
    }
}
 
Example #21
Source File: KafkaAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testBelowMinIsr(VertxTestContext context) {
    KSB ksb = new KSB()
        .addNewTopic("A", false)
            .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2")
            .addNewPartition(0)
                .replicaOn(0, 1, 3)
                .leader(0)
                .isr(0, 1)
            .endPartition()
        .endTopic()
        .addNewTopic("B", false)
            .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2")
            .addNewPartition(0)
                .replicaOn(0, 1, 3)
                .leader(1)
                .isr(1)
            .endPartition()
        .endTopic()

        .addBroker(4);

    KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac());

    Checkpoint a = context.checkpoint(ksb.brokers.size());
    for (Integer brokerId : ksb.brokers.keySet()) {
        kafkaAvailability.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> {
            if (brokerId == 4) {
                assertTrue(canRoll,
                        "broker " + brokerId + " should be rollable, having no partitions");
            } else {
                assertFalse(canRoll,
                        "broker " + brokerId + " should not be rollable, being minisr = 2 and it's only replicated on two brokers");
            }
            a.flag();
        })));
    }
}
 
Example #22
Source File: KafkaAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testNoLeader(VertxTestContext context) {
    KSB ksb = new KSB()
            .addNewTopic("A", false)
                .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    //.leader(0)
                    .isr(1, 2)
                .endPartition()
            .endTopic()
            .addNewTopic("B", false)
                .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    //.leader(1)
                    .isr(0)
                .endPartition()
            .endTopic()

            .addBroker(3);

    KafkaAvailability kafkaSorted = new KafkaAvailability(ksb.ac());

    Checkpoint a = context.checkpoint(ksb.brokers.size());
    for (Integer brokerId : ksb.brokers.keySet()) {
        kafkaSorted.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> {
            if (brokerId == 0) {
                assertFalse(canRoll,
                        "broker " + brokerId + " should not be rollable, because B/0 would be below min isr");
            } else {
                assertTrue(canRoll,
                        "broker " + brokerId + " should be rollable, being minisr = 1 and having two brokers in its isr");
            }
            a.flag();
        })));
    }
}
 
Example #23
Source File: KafkaAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testAtMinIsr(VertxTestContext context) {
    KSB ksb = new KSB()
        .addNewTopic("A", false)
            .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2")
            .addNewPartition(0)
                .replicaOn(0, 1)
                .leader(0)
                .isr(0, 1)
            .endPartition()
        .endTopic()
        .addNewTopic("B", false)
            .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2")
            .addNewPartition(0)
                .replicaOn(0, 1)
                .leader(1)
                .isr(0, 1)
            .endPartition()
        .endTopic()

        .addBroker(2);

    KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac());

    Checkpoint a = context.checkpoint(ksb.brokers.size());
    for (Integer brokerId : ksb.brokers.keySet()) {
        kafkaAvailability.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> {
            if (brokerId == 2) {
                assertTrue(canRoll,
                        "broker " + brokerId + " should be rollable, having no partitions");
            } else {
                assertTrue(canRoll,
                        "broker " + brokerId + " should be rollable, because although rolling it will impact availability minisr=|replicas|");
            }
            a.flag();
        })));
    }
}
 
Example #24
Source File: KafkaAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testAboveMinIsr(VertxTestContext context) {
    KSB ksb = new KSB()
            .addNewTopic("A", false)
                .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    .leader(0)
                    .isr(0, 1, 2)
                .endPartition()
            .endTopic()
            .addNewTopic("B", false)
                .addToConfig(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "1")
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    .leader(1)
                    .isr(0, 1, 2)
                .endPartition()
            .endTopic()

            .addBroker(3);

    KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac());

    Checkpoint a = context.checkpoint(ksb.brokers.size());
    for (Integer brokerId : ksb.brokers.keySet()) {
        kafkaAvailability.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> {
            assertTrue(canRoll,
                    "broker " + brokerId + " should be rollable, being minisr = 1 and having two brokers in its isr");
            a.flag();
        })));
    }
}
 
Example #25
Source File: MegabusModule.java    From emodb with Apache License 2.0 5 votes vote down vote up
@Provides
@Singleton
@RetryRefTopic
Topic provideRetryRefTopic(MegabusConfiguration megabusConfiguration, KafkaCluster kafkaCluster) {
    kafkaCluster.createTopicIfNotExists(megabusConfiguration.getRetryRefTopic(),
            ImmutableMap.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE,
                    TopicConfig.RETENTION_MS_CONFIG, REF_TOPIC_RETENTION,
                    TopicConfig.COMPRESSION_TYPE_CONFIG, COMPRESSION_CONFIGURATION));
    return megabusConfiguration.getRetryRefTopic();
}
 
Example #26
Source File: MegabusModule.java    From emodb with Apache License 2.0 5 votes vote down vote up
@Provides
@Singleton
@MissingRefTopic
Topic provideMissingRefTopic(MegabusConfiguration megabusConfiguration, KafkaCluster kafkaCluster) {
    kafkaCluster.createTopicIfNotExists(megabusConfiguration.getMissingRefTopic(),
            ImmutableMap.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE,
                    TopicConfig.RETENTION_MS_CONFIG, REF_TOPIC_RETENTION,
                    TopicConfig.COMPRESSION_TYPE_CONFIG, COMPRESSION_CONFIGURATION));
    return megabusConfiguration.getMissingRefTopic();
}
 
Example #27
Source File: MegabusModule.java    From emodb with Apache License 2.0 5 votes vote down vote up
@Provides
@Singleton
@MegabusTopic
Topic provideMegabusTopic(MegabusConfiguration megabusConfiguration, KafkaCluster kafkaCluster) {
    kafkaCluster.createTopicIfNotExists(megabusConfiguration.getMegabusTopic(),
            ImmutableMap.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT,
                    TopicConfig.DELETE_RETENTION_MS_CONFIG, MEGABUS_DELETE_RETENTION,
                    TopicConfig.COMPRESSION_TYPE_CONFIG, COMPRESSION_CONFIGURATION));
    return megabusConfiguration.getMegabusTopic();
}
 
Example #28
Source File: MegabusModule.java    From emodb with Apache License 2.0 5 votes vote down vote up
@Provides
@Singleton
@MegabusRefTopic
Topic provideMegabusRefTopic(MegabusConfiguration megabusConfiguration, KafkaCluster kafkaCluster) {

    kafkaCluster.createTopicIfNotExists(megabusConfiguration.getMegabusRefTopic(),
            ImmutableMap.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE,
                    TopicConfig.RETENTION_MS_CONFIG, REF_TOPIC_RETENTION,
                    TopicConfig.COMPRESSION_TYPE_CONFIG, COMPRESSION_CONFIGURATION));
    return megabusConfiguration.getMegabusRefTopic();
}
 
Example #29
Source File: AdminClientTest.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
@Test
public void testAlterConfigs(TestContext ctx) {

  KafkaAdminClient adminClient = KafkaAdminClient.create(this.vertx, config);

  ConfigResource resource = new ConfigResource(org.apache.kafka.common.config.ConfigResource.Type.TOPIC, "first-topic");
  // create a entry for updating the retention.ms value on the topic
  ConfigEntry retentionEntry = new ConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "51000");
  Map<ConfigResource, Config> updateConfig = new HashMap<>();
  updateConfig.put(resource, new Config(Collections.singletonList(retentionEntry)));

  adminClient.alterConfigs(updateConfig, ctx.asyncAssertSuccess(v -> {

    adminClient.describeConfigs(Collections.singletonList(resource), ctx.asyncAssertSuccess(describeConfig -> {

      ConfigEntry describeRetentionEntry =
        describeConfig.get(resource)
          .getEntries()
          .stream()
          .filter(entry -> entry.getName().equals(TopicConfig.RETENTION_MS_CONFIG))
          .collect(Collectors.toList())
          .get(0);

      ctx.assertEquals("51000", describeRetentionEntry.getValue());
      adminClient.close();
    }));
  }));
}
 
Example #30
Source File: KafkaAdminClientExamples.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
/**
 * Example about altering resources configuration like topic or broker
 * @param adminClient Kafka admin client instance
 */
public void exampleAlterConfigs(KafkaAdminClient adminClient) {
  ConfigResource resource = new ConfigResource(org.apache.kafka.common.config.ConfigResource.Type.TOPIC, "my-topic");
  // create a entry for updating the retention.ms value on the topic
  ConfigEntry retentionEntry = new ConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "51000");
  Map<ConfigResource, Config> updateConfig = new HashMap<>();
  updateConfig.put(resource, new Config(Collections.singletonList(retentionEntry)));
  adminClient.alterConfigs(updateConfig)
    .onSuccess(v -> {
      // configuration altered successfully
    })
    .onFailure(cause -> {
      // something went wrong when altering configs
    });
}