io.debezium.kafka.KafkaCluster Java Examples

The following examples show how to use io.debezium.kafka.KafkaCluster. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaTestResource.java    From quarkus with Apache License 2.0 6 votes vote down vote up
@Override
public Map<String, String> start() {
    try {
        Properties props = new Properties();
        props.setProperty("zookeeper.connection.timeout.ms", "45000");
        File directory = Testing.Files.createTestingDirectory("kafka-data", true);
        kafka = new KafkaCluster().withPorts(2182, 19092)
                .addBrokers(1)
                .usingDirectory(directory)
                .deleteDataUponShutdown(true)
                .withKafkaConfiguration(props)
                .deleteDataPriorToStartup(true)
                .startup();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    return Collections.emptyMap();
}
 
Example #2
Source File: KafkaFacade.java    From strimzi-kafka-bridge with Apache License 2.0 6 votes vote down vote up
private static KafkaCluster kafkaCluster() {

        if (kafkaCluster != null) {
            throw new IllegalStateException();
        }
        dataDir = Testing.Files.createTestingDirectory(DATA_DIR);

        Properties props = new Properties();
        props.put("auto.create.topics.enable", "false");

        kafkaCluster =
            new KafkaCluster()
                .usingDirectory(dataDir)
                .withPorts(ZOOKEEPER_PORT, KAFKA_PORT)
                .withKafkaConfiguration(props);
        return kafkaCluster;
    }
 
Example #3
Source File: KafkaConnectorIT.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
@BeforeEach
public void beforeEach() throws IOException, InterruptedException {
    // Start a 3 node Kafka cluster
    cluster = new KafkaCluster()
        .addBrokers(3)
        .deleteDataPriorToStartup(true)
        .deleteDataUponShutdown(true)
        .usingDirectory(Files.createTempDirectory("operator-integration-test").toFile());

    cluster.startup();

    String connectClusterName = getClass().getSimpleName();
    cluster.createTopics(connectClusterName + "-offsets", connectClusterName + "-config", connectClusterName + "-status");

    // Start a 3 node connect cluster
    connectCluster = new ConnectCluster()
            .usingBrokers(cluster)
            .addConnectNodes(3);
    connectCluster.startup();
}
 
Example #4
Source File: SimpleAclOperatorIT.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
@BeforeAll
public static void beforeAll() {
    vertx = Vertx.vertx();

    try {
        kafkaCluster =
                new KafkaCluster()
                        .usingDirectory(Testing.Files.createTestingDirectory("simple-acl-operator-integration-test"))
                        .deleteDataPriorToStartup(true)
                        .deleteDataUponShutdown(true)
                        .addBrokers(1)
                        .withKafkaConfiguration(kafkaClusterConfig())
                        .startup();
    } catch (IOException e) {
        assertThat(false, is(true));
    }

    simpleAclOperator = new SimpleAclOperator(vertx,
            new DefaultAdminClientProvider().createAdminClient(kafkaCluster.brokerList(), null, null, null));
}
 
Example #5
Source File: KafkaTestBase.java    From smallrye-reactive-messaging with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void startKafkaBroker() throws IOException {
    Properties props = new Properties();
    props.setProperty("zookeeper.connection.timeout.ms", "10000");
    File directory = Testing.Files.createTestingDirectory(System.getProperty("java.io.tmpdir"), true);
    kafka = new KafkaCluster().withPorts(2182, 9092).addBrokers(1)
            .usingDirectory(directory)
            .deleteDataUponShutdown(false)
            .withKafkaConfiguration(props)
            .deleteDataPriorToStartup(true)
            .startup();
}
 
Example #6
Source File: KafkaSASLTestResource.java    From quarkus with Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, String> start() {
    try {
        File directory = Testing.Files.createTestingDirectory("kafka-data-sasl", true);

        enableServerJaasConf();

        Properties props = new Properties();
        props.setProperty("zookeeper.connection.timeout.ms", "45000");
        props.setProperty("listener.security.protocol.map", "CLIENT:SASL_PLAINTEXT");
        props.setProperty("listeners", "CLIENT://:19094");
        props.setProperty("inter.broker.listener.name", "CLIENT");

        props.setProperty("sasl.enabled.mechanisms", "PLAIN");
        props.setProperty("sasl.mechanism.inter.broker.protocol", "PLAIN");

        kafka = new KafkaCluster()
                .withPorts(2184, 19094)
                .addBrokers(1)
                .usingDirectory(directory)
                .deleteDataUponShutdown(true)
                .withKafkaConfiguration(props)
                .deleteDataPriorToStartup(true)
                .startup();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }

    return Collections.emptyMap();
}
 
Example #7
Source File: KafkaClusterTestBase.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
protected static KafkaCluster kafkaCluster() {
  if (kafkaCluster != null) {
    throw new IllegalStateException();
  }
  dataDir = Testing.Files.createTestingDirectory("cluster");
  kafkaCluster = new KafkaCluster().usingDirectory(dataDir).withPorts(2181, 9092);
  return kafkaCluster;
}
 
Example #8
Source File: TopicOperatorMockTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
private static int zkPort(KafkaCluster cluster) {
    // TODO Method was added in DBZ-540, so no need for reflection once
    // dependency gets upgraded
    try {
        Field zkServerField = KafkaCluster.class.getDeclaredField("zkServer");
        zkServerField.setAccessible(true);
        return ((ZookeeperServer) zkServerField.get(cluster)).getPort();
    } catch (ReflectiveOperationException e) {
        throw new RuntimeException(e);
    }
}
 
Example #9
Source File: TopicOperatorBaseIT.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
protected static int zkPort(KafkaCluster cluster) {
    // TODO Method was added in DBZ-540, so no need for reflection once
    // dependency gets upgraded
    try {
        Field zkServerField = KafkaCluster.class.getDeclaredField("zkServer");
        zkServerField.setAccessible(true);
        return ((ZookeeperServer) zkServerField.get(cluster)).getPort();
    } catch (ReflectiveOperationException e) {
        throw new RuntimeException(e);
    }
}
 
Example #10
Source File: KafkaTestResource.java    From quarkus with Apache License 2.0 4 votes vote down vote up
@Override
public Map<String, String> start() {
    try {
        File directory = Testing.Files.createTestingDirectory("kafka-data", true);
        File sslDir = sslDir(directory, true);

        Path ksPath = new File(sslDir, "ks-keystore.p12").toPath();
        try (InputStream ksStream = getClass().getResourceAsStream("/ks-keystore.p12")) {
            Files.copy(
                    ksStream,
                    ksPath,
                    StandardCopyOption.REPLACE_EXISTING);
        }

        Path tsPath = new File(sslDir, "ks-truststore.p12").toPath();
        try (InputStream tsStream = getClass().getResourceAsStream("/ks-truststore.p12")) {
            Files.copy(
                    tsStream,
                    tsPath,
                    StandardCopyOption.REPLACE_EXISTING);
        }
        String password = "Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L";
        String type = "PKCS12";

        Properties props = new Properties();
        props.setProperty("zookeeper.connection.timeout.ms", "45000");

        // http://kafka.apache.org/documentation.html#security_ssl
        props.setProperty("listener.security.protocol.map", "CLIENT:SSL");
        props.setProperty("listeners", "CLIENT://:19092");
        props.setProperty("inter.broker.listener.name", "CLIENT");
        props.setProperty(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, ksPath.toString());
        props.setProperty(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, password);
        props.setProperty(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, type);
        props.setProperty(SslConfigs.SSL_KEY_PASSWORD_CONFIG, password);
        props.setProperty(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, tsPath.toString());
        props.setProperty(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, password);
        props.setProperty(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, type);
        props.setProperty(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");

        kafka = new KafkaCluster()
                .withPorts(2182, 19092)
                .addBrokers(1)
                .usingDirectory(directory)
                .deleteDataUponShutdown(true)
                .withKafkaConfiguration(props)
                .deleteDataPriorToStartup(true)
                .startup();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    return Collections.emptyMap();
}
 
Example #11
Source File: KafkaSSLTestResource.java    From quarkus with Apache License 2.0 4 votes vote down vote up
@Override
public Map<String, String> start() {
    try {
        File directory = Testing.Files.createTestingDirectory("kafka-data-ssl", true);
        File sslDir = sslDir(directory, true);

        Path ksPath = new File(sslDir, "kafka-keystore.p12").toPath();
        try (InputStream ksStream = getClass().getResourceAsStream("/kafka-keystore.p12")) {
            Files.copy(
                    ksStream,
                    ksPath,
                    StandardCopyOption.REPLACE_EXISTING);
        }

        Path tsPath = new File(sslDir, "kafka-truststore.p12").toPath();
        try (InputStream tsStream = getClass().getResourceAsStream("/kafka-truststore.p12")) {
            Files.copy(
                    tsStream,
                    tsPath,
                    StandardCopyOption.REPLACE_EXISTING);
        }
        String password = "Z_pkTh9xgZovK4t34cGB2o6afT4zZg0L";
        String type = "PKCS12";

        Properties props = new Properties();
        props.setProperty("zookeeper.connection.timeout.ms", "45000");
        //See http://kafka.apache.org/documentation.html#security_ssl for detail
        props.setProperty("listener.security.protocol.map", "CLIENT:SSL");
        props.setProperty("listeners", "CLIENT://:19093");
        props.setProperty("inter.broker.listener.name", "CLIENT");
        props.setProperty(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, ksPath.toString());
        props.setProperty(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, password);
        props.setProperty(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, type);
        props.setProperty(SslConfigs.SSL_KEY_PASSWORD_CONFIG, password);
        props.setProperty(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, tsPath.toString());
        props.setProperty(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, password);
        props.setProperty(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, type);
        props.setProperty(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG, "");

        kafka = new KafkaCluster()
                .withPorts(2183, 19093)
                .addBrokers(1)
                .usingDirectory(directory)
                .deleteDataUponShutdown(true)
                .withKafkaConfiguration(props)
                .deleteDataPriorToStartup(true)
                .startup();
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    return Collections.emptyMap();
}
 
Example #12
Source File: Main.java    From redpipe with Apache License 2.0 4 votes vote down vote up
private static void onStart() {
	System.err.println("Started");

	// Kafka setup for the example
    File dataDir = Testing.Files.createTestingDirectory("cluster");
    dataDir.deleteOnExit();
    KafkaCluster kafkaCluster;
	try {
		kafkaCluster = new KafkaCluster()
		  .usingDirectory(dataDir)
		  .withPorts(2181, 9092)
		  .addBrokers(1)
		  .deleteDataPriorToStartup(true)
		  .startup();
	} catch (IOException e) {
		throw new RuntimeException(e);
	}

    // Deploy the dashboard
    JsonObject consumerConfig = new JsonObject((Map) kafkaCluster.useTo()
      .getConsumerProperties("the_group", "the_client", OffsetResetStrategy.LATEST));

    AppGlobals globals = AppGlobals.get();
    
    // Create the consumer
	KafkaConsumer<String, JsonObject> consumer = KafkaConsumer.create(globals.getVertx(), (Map)consumerConfig.getMap(), 
    		String.class, JsonObject.class);
	
	BehaviorSubject<JsonObject> consumerReporter = BehaviorSubject.create();
	consumer.toObservable().subscribe(record -> consumerReporter.onNext(record.value()));
	
    // Subscribe to Kafka
    consumer.subscribe("the_topic");
    globals.setGlobal("consumer", consumerReporter);
    

    // Deploy the metrics collector : 3 times
    JsonObject producerConfig = new JsonObject((Map) kafkaCluster.useTo()
      .getProducerProperties("the_producer"));
    globals.getVertx().deployVerticle(
      MetricsVerticle.class.getName(),
      new DeploymentOptions().setConfig(producerConfig).setInstances(3)
    );
}
 
Example #13
Source File: ConnectCluster.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
ConnectCluster usingBrokers(KafkaCluster kafkaCluster) {
    this.brokerList = kafkaCluster.brokerList();
    return this;
}
 
Example #14
Source File: KafkaConnectApiTest.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
@BeforeEach
public void beforeEach() throws IOException, InterruptedException {
    // Start a 3 node Kafka cluster
    cluster = new KafkaCluster();
    cluster.addBrokers(3);
    cluster.deleteDataPriorToStartup(true);
    cluster.deleteDataUponShutdown(true);
    cluster.usingDirectory(Files.createTempDirectory("operator-integration-test").toFile());
    cluster.startup();
    cluster.createTopics(getClass().getSimpleName() + "-offsets",
            getClass().getSimpleName() + "-config",
            getClass().getSimpleName() + "-status");

    // Start a N node connect cluster
    Map<String, String> workerProps = new HashMap<>();
    workerProps.put("listeners", "http://localhost:" + PORT);
    File tempDirectory = Files.createTempDirectory(getClass().getSimpleName()).toFile();
    workerProps.put("plugin.path", tempDirectory.toString());
    workerProps.put("group.id", toString());
    workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter");
    workerProps.put("key.converter.schemas.enable", "false");
    workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter");
    workerProps.put("value.converter.schemas.enable", "false");
    workerProps.put("offset.storage.topic", getClass().getSimpleName() + "-offsets");
    workerProps.put("config.storage.topic", getClass().getSimpleName() + "-config");
    workerProps.put("status.storage.topic", getClass().getSimpleName() + "-status");
    workerProps.put("bootstrap.servers", cluster.brokerList());
    //DistributedConfig config = new DistributedConfig(workerProps);
    //RestServer rest = new RestServer(config);
    //rest.initializeServer();
    CountDownLatch l = new CountDownLatch(1);
    Thread thread = new Thread(() -> {
        ConnectDistributed connectDistributed = new ConnectDistributed();
        connect = connectDistributed.startConnect(workerProps);
        l.countDown();
        connect.awaitStop();
    });
    thread.setDaemon(false);
    thread.start();
    l.await();
}
 
Example #15
Source File: TopicOperatorBaseIT.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
@BeforeEach
public void setup() throws Exception {
    LOGGER.info("Setting up test");
    cluster.before();
    int counts = 3;
    do {
        try {
            kafkaCluster = new KafkaCluster();
            kafkaCluster.addBrokers(numKafkaBrokers());
            kafkaCluster.deleteDataPriorToStartup(true);
            kafkaCluster.deleteDataUponShutdown(true);
            kafkaCluster.usingDirectory(Files.createTempDirectory("operator-integration-test").toFile());
            kafkaCluster.withKafkaConfiguration(kafkaClusterConfig());
            kafkaCluster.startup();
            break;
        } catch (kafka.zookeeper.ZooKeeperClientTimeoutException e) {
            if (counts == 0) {
                throw e;
            }
            counts--;
        }
    } while (true);

    Properties p = new Properties();
    p.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaCluster.brokerList());
    adminClient = AdminClient.create(p);

    kubeClient = kubeClient().getClient();
    Crds.registerCustomKinds();
    LOGGER.info("Using namespace {}", NAMESPACE);
    startTopicOperator();

    // We can't delete events, so record the events which exist at the start of the test
    // and then waitForEvents() can ignore those
    preExistingEvents = kubeClient.events().inNamespace(NAMESPACE).withLabels(labels.labels()).list().
            getItems().stream().
            map(evt -> evt.getMetadata().getUid()).
            collect(Collectors.toSet());

    LOGGER.info("Finished setting up test");
}