Java Code Examples for io.vertx.junit5.VertxTestContext#checkpoint()

The following examples show how to use io.vertx.junit5.VertxTestContext#checkpoint() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TopicOperatorMockTest.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
Topic getFromKafka(VertxTestContext context, String topicName) throws InterruptedException {
    AtomicReference<Topic> ref = new AtomicReference<>();
    Checkpoint async = context.checkpoint();
    Future<TopicMetadata> kafkaMetadata = session.kafka.topicMetadata(new TopicName(topicName));
    kafkaMetadata.map(metadata -> TopicSerialization.fromTopicMetadata(metadata)).onComplete(fromKafka -> {
        if (fromKafka.succeeded()) {
            ref.set(fromKafka.result());
        } else {
            context.failNow(fromKafka.cause());
        }
        async.flag();
    });
    if (!context.awaitCompletion(60, TimeUnit.SECONDS)) {
        context.failNow(new Throwable("Test timeout"));
    }
    return ref.get();
}
 
Example 2
Source File: AbstractNonNamespacedResourceOperatorTest.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
@Test
public void testSuccessfulCreation(VertxTestContext context) {
    T resource = resource();
    Resource mockResource = mock(resourceType());
    when(mockResource.get()).thenReturn(null);
    when(mockResource.create(any())).thenReturn(resource);

    NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class);
    when(mockNameable.withName(matches(resource.getMetadata().getName()))).thenReturn(mockResource);

    MixedOperation mockCms = mock(MixedOperation.class);
    when(mockCms.withName(matches(RESOURCE_NAME))).thenReturn(mockResource);

    C mockClient = mock(clientType());
    mocker(mockClient, mockCms);

    AbstractNonNamespacedResourceOperator<C, T, L, D, R> op = createResourceOperationsWithMockedReadiness(
            vertx, mockClient);

    Checkpoint async = context.checkpoint();
    op.createOrUpdate(resource).onComplete(context.succeeding(rr -> {
        verify(mockResource).get();
        verify(mockResource).create(eq(resource));
        async.flag();
    }));
}
 
Example 3
Source File: PlatformFeaturesAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
@Test
public void testVersionDetectionOpenShift39(VertxTestContext context) throws InterruptedException {
    String version = "{\n" +
            "  \"major\": \"1\",\n" +
            "  \"minor\": \"9\",\n" +
            "  \"gitVersion\": \"v1.9.1+a0ce1bc657\",\n" +
            "  \"gitCommit\": \"a0ce1bc\",\n" +
            "  \"gitTreeState\": \"clean\",\n" +
            "  \"buildDate\": \"2018-06-24T01:54:00Z\",\n" +
            "  \"goVersion\": \"go1.9\",\n" +
            "  \"compiler\": \"gc\",\n" +
            "  \"platform\": \"linux/amd64\"\n" +
            "}";

    HttpServer mockHttp = startMockApi(context, version, Collections.EMPTY_LIST);

    KubernetesClient client = new DefaultKubernetesClient("127.0.0.1:" + mockHttp.actualPort());

    Checkpoint a = context.checkpoint();

    PlatformFeaturesAvailability.create(vertx, client).onComplete(context.succeeding(pfa -> context.verify(() -> {
        assertThat("Versions are not equal", pfa.getKubernetesVersion(), is(KubernetesVersion.V1_9));
        stopMockApi(context, mockHttp);
        a.flag();
    })));
}
 
Example 4
Source File: RouteToEBServiceHandlerTest.java    From vertx-web with Apache License 2.0 6 votes vote down vote up
@Test
public void emptyOperationResultTest(Vertx vertx, VertxTestContext testContext) {
  Checkpoint checkpoint = testContext.checkpoint();

  TestService service = new TestServiceImpl(vertx);
  final ServiceBinder serviceBinder = new ServiceBinder(vertx).setAddress("someAddress");
  consumer = serviceBinder.register(TestService.class, service);

  router
    .get("/test")
    .handler(
      ValidationHandler.builder(parser).build()
    ).handler(
      RouteToEBServiceHandler.build(vertx.eventBus(), "someAddress", "testEmptyServiceResponse")
    );

  testRequest(client, HttpMethod.GET, "/test")
    .expect(statusCode(200), statusMessage("OK"))
    .expect(emptyResponse())
    .send(testContext, checkpoint);
}
 
Example 5
Source File: Examples.java    From vertx-junit5 with Apache License 2.0 6 votes vote down vote up
@Test
public void checkpointing(Vertx vertx, VertxTestContext testContext) {
  Checkpoint serverStarted = testContext.checkpoint();
  Checkpoint requestsServed = testContext.checkpoint(10);
  Checkpoint responsesReceived = testContext.checkpoint(10);

  vertx.createHttpServer()
    .requestHandler(req -> {
      req.response().end("Ok");
      requestsServed.flag();
    })
    .listen(8888)
    .onComplete(testContext.succeeding(httpServer -> serverStarted.flag()));

  HttpClient client = vertx.createHttpClient();
  for (int i = 0; i < 10; i++) {
    client.get(8888, "localhost", "/")
      .compose(HttpClientResponse::body)
      .onComplete(testContext.succeeding(buffer -> testContext.verify(() -> {
        assertThat(buffer.toString()).isEqualTo("Ok");
        responsesReceived.flag();
      })));
  }
}
 
Example 6
Source File: ValidationHandlerPredicatesIntegrationTest.java    From vertx-web with Apache License 2.0 5 votes vote down vote up
@Test
public void testFileUploadExists(VertxTestContext testContext, @TempDir Path tempDir) {
  Checkpoint checkpoint = testContext.checkpoint(4);

  ValidationHandler validationHandler = ValidationHandler
    .builder(parser)
    .predicate(RequestPredicate.multipartFileUploadExists(
      "myfile",
      Pattern.quote("text/plain")
    ))
    .build();

  router.post("/testFileUpload")
    .handler(BodyHandler.create(tempDir.toAbsolutePath().toString()))
    .handler(validationHandler)
    .handler(routingContext ->
      routingContext
        .response()
        .setStatusCode(200)
        .end()
    );

  testRequest(client, HttpMethod.POST, "/testFileUpload")
    .expect(statusCode(200))
    .send(testContext, checkpoint);

  testRequest(client, HttpMethod.POST, "/testFileUpload")
    .expect(statusCode(400))
    .sendMultipartForm(MultipartForm.create(), testContext, checkpoint);

  testRequest(client, HttpMethod.POST, "/testFileUpload")
    .expect(statusCode(400))
    .sendMultipartForm(MultipartForm.create().attribute("myfile", "bla"), testContext, checkpoint);

  testRequest(client, HttpMethod.POST, "/testFileUpload")
    .expect(statusCode(200))
    .sendMultipartForm(MultipartForm.create().textFileUpload("myfile", "myfile.txt", "src/test/resources/myfile.txt", "text/plain"), testContext, checkpoint);
}
 
Example 7
Source File: AbstractNonNamespacedResourceOperatorTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testReconcileThrowsWhenDeletionTimesOut(VertxTestContext context) {
    T resource = resource();
    AtomicBoolean watchWasClosed = new AtomicBoolean(false);
    Resource mockResource = mock(resourceType());
    when(mockResource.get()).thenReturn(resource);
    when(mockResource.withGracePeriod(anyLong())).thenReturn(mockResource);
    when(mockResource.delete()).thenReturn(true);
    when(mockResource.watch(any())).thenAnswer(invocation -> {
        Watcher<T> watcher = invocation.getArgument(0);
        return (Watch) () -> {
            watchWasClosed.set(true);
        };
    });

    NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class);
    when(mockNameable.withName(matches(RESOURCE_NAME))).thenReturn(mockResource);

    MixedOperation mockCms = mock(MixedOperation.class);
    when(mockCms.withName(matches(RESOURCE_NAME))).thenReturn(mockResource);

    C mockClient = mock(clientType());
    mocker(mockClient, mockCms);

    AbstractNonNamespacedResourceOperator<C, T, L, D, R> op = createResourceOperations(vertx, mockClient);

    Checkpoint async = context.checkpoint();
    op.reconcile(resource.getMetadata().getName(), null).onComplete(context.failing(e -> context.verify(() -> {
        assertThat(e, instanceOf(TimeoutException.class));
        verify(mockResource).delete();
        assertThat("Watch was not closed", watchWasClosed.get(), is(true));
        async.flag();
    })));
}
 
Example 8
Source File: ValidationHandlerPredicatesIntegrationTest.java    From vertx-web with Apache License 2.0 5 votes vote down vote up
@Test
public void testRequiredBodyPredicate(VertxTestContext testContext, @TempDir Path tempDir) {
  Checkpoint checkpoint = testContext.checkpoint(2);

  ValidationHandler validationHandler = ValidationHandler
    .builder(parser)
    .predicate(RequestPredicate.BODY_REQUIRED)
    .build();

  router.route("/testRequiredBody")
    .handler(BodyHandler.create(tempDir.toAbsolutePath().toString()))
    .handler(validationHandler)
    .handler(routingContext ->
      routingContext
        .response()
        .setStatusCode(200)
        .end()
    );

  testRequest(client, HttpMethod.POST, "/testRequiredBody")
    .expect(statusCode(200))
    .sendJson(new JsonObject(), testContext, checkpoint);

  testRequest(client, HttpMethod.GET, "/testRequiredBody")
    .expect(statusCode(400), failurePredicateResponse())
    .send(testContext, checkpoint);

  testRequest(client, HttpMethod.POST, "/testRequiredBody")
    .expect(statusCode(400), failurePredicateResponse())
    .send(testContext, checkpoint);
}
 
Example 9
Source File: CertificateRenewalTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testReconcileCasGeneratesCertsInitially(VertxTestContext context) {
    CertificateAuthority certificateAuthority = new CertificateAuthorityBuilder()
            .withValidityDays(100)
            .withRenewalDays(10)
            .withGenerateCertificateAuthority(true)
            .build();

    // Delete secrets to emulate secrets not pre-existing
    secrets.clear();

    Checkpoint async = context.checkpoint();
    reconcileCa(context, certificateAuthority, certificateAuthority)
        .onComplete(context.succeeding(c -> context.verify(() -> {
            assertThat(c.getAllValues(), hasSize(4));

            assertThat(c.getAllValues().get(0).getData().keySet(), is(set(CA_CRT, CA_STORE, CA_STORE_PASSWORD)));
            assertThat(isCertInTrustStore(CA_CRT, c.getAllValues().get(0).getData()), is(true));

            assertThat(c.getAllValues().get(1).getData().keySet(), is(singleton(CA_KEY)));

            assertThat(c.getAllValues().get(2).getData().keySet(), is(set(CA_CRT, CA_STORE, CA_STORE_PASSWORD)));
            assertThat(isCertInTrustStore(CA_CRT, c.getAllValues().get(2).getData()), is(true));

            assertThat(c.getAllValues().get(3).getData().keySet(), is(singleton(CA_KEY)));

            async.flag();
        })));
}
 
Example 10
Source File: KafkaAvailabilityTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testNoMinIsr(VertxTestContext context) {
    KSB ksb = new KSB()
            .addNewTopic("A", false)
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    .leader(0)
                    .isr(0, 1, 2)
                .endPartition()
            .endTopic()
            .addNewTopic("B", false)
                .addNewPartition(0)
                    .replicaOn(0, 1, 2)
                    .leader(1)
                    .isr(1, 0, 2)
                .endPartition()
            .endTopic()

            .addBroker(3);

    KafkaAvailability kafkaAvailability = new KafkaAvailability(ksb.ac());

    Checkpoint a = context.checkpoint(ksb.brokers.size());
    for (Integer brokerId : ksb.brokers.keySet()) {
        kafkaAvailability.canRoll(brokerId).onComplete(context.succeeding(canRoll -> context.verify(() -> {
            assertTrue(canRoll,
                    "broker " + brokerId + " should be rollable, being minisr = 1 and having two brokers in its isr");
            a.flag();
        })));
    }
}
 
Example 11
Source File: KafkaUserOperatorTest.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
@Test
public void testReconcileNewScramShaUser(VertxTestContext context)    {
    CrdOperator mockCrdOps = mock(CrdOperator.class);
    SecretOperator mockSecretOps = mock(SecretOperator.class);
    SimpleAclOperator aclOps = mock(SimpleAclOperator.class);
    ScramShaCredentialsOperator scramOps = mock(ScramShaCredentialsOperator.class);
    KafkaUserQuotasOperator quotasOps = mock(KafkaUserQuotasOperator.class);

    KafkaUserOperator op = new KafkaUserOperator(vertx, mockCertManager, mockCrdOps, Labels.EMPTY, mockSecretOps, scramOps, quotasOps, aclOps, ResourceUtils.CA_CERT_NAME, ResourceUtils.CA_KEY_NAME, ResourceUtils.NAMESPACE);
    KafkaUser user = ResourceUtils.createKafkaUserScramSha();

    ArgumentCaptor<String> secretNamespaceCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> secretNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<Secret> secretCaptor = ArgumentCaptor.forClass(Secret.class);
    when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture());

    ArgumentCaptor<String> aclNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<Set<SimpleAclRule>> aclRulesCaptor = ArgumentCaptor.forClass(Set.class);
    when(aclOps.reconcile(aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture());

    ArgumentCaptor<String> scramUserCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> scramPasswordCaptor = ArgumentCaptor.forClass(String.class);
    when(scramOps.reconcile(scramUserCaptor.capture(), scramPasswordCaptor.capture())).thenReturn(Future.succeededFuture());

    when(mockSecretOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(null);

    when(mockCrdOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(user);
    when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(user));
    when(mockCrdOps.updateStatusAsync(any(KafkaUser.class))).thenReturn(Future.succeededFuture());
    when(quotasOps.reconcile(any(), any())).thenReturn(Future.succeededFuture());

    Checkpoint async = context.checkpoint();
    op.reconcile(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME))
        .onComplete(context.succeeding(v -> context.verify(() -> {

            List<String> capturedNames = secretNameCaptor.getAllValues();
            assertThat(capturedNames, hasSize(1));
            assertThat(capturedNames.get(0), is(ResourceUtils.NAME));

            List<String> capturedNamespaces = secretNamespaceCaptor.getAllValues();
            assertThat(capturedNamespaces, hasSize(1));
            assertThat(capturedNamespaces.get(0), is(ResourceUtils.NAMESPACE));

            List<Secret> capturedSecrets = secretCaptor.getAllValues();

            assertThat(capturedSecrets, hasSize(1));

            Secret captured = capturedSecrets.get(0);
            assertThat(captured.getMetadata().getName(), is(user.getMetadata().getName()));
            assertThat(captured.getMetadata().getNamespace(), is(user.getMetadata().getNamespace()));
            assertThat(captured.getMetadata().getLabels(),
                    is(Labels.fromMap(user.getMetadata().getLabels())
                            .withKubernetesName(KafkaUserModel.KAFKA_USER_OPERATOR_NAME)
                            .withKubernetesInstance(ResourceUtils.NAME)
                            .withKubernetesPartOf(ResourceUtils.NAME)
                            .withKubernetesManagedBy(KafkaUserModel.KAFKA_USER_OPERATOR_NAME)
                            .withStrimziKind(KafkaUser.RESOURCE_KIND)
                            .toMap()));

            assertThat(scramPasswordCaptor.getValue(), is(new String(Base64.getDecoder().decode(captured.getData().get(KafkaUserModel.KEY_PASSWORD)))));
            assertThat(new String(Base64.getDecoder().decode(captured.getData().get(KafkaUserModel.KEY_PASSWORD))).matches("[a-zA-Z0-9]{12}"), is(true));

            List<String> capturedAclNames = aclNameCaptor.getAllValues();
            assertThat(capturedAclNames, hasSize(2));
            assertThat(capturedAclNames.get(0), is(KafkaUserModel.getTlsUserName(ResourceUtils.NAME)));
            assertThat(capturedAclNames.get(1), is(KafkaUserModel.getScramUserName(ResourceUtils.NAME)));

            List<Set<SimpleAclRule>> capturedAcls = aclRulesCaptor.getAllValues();

            assertThat(capturedAcls, hasSize(2));
            Set<SimpleAclRule> aclRules = capturedAcls.get(1);

            assertThat(aclRules, hasSize(ResourceUtils.createExpectedSimpleAclRules(user).size()));
            assertThat(aclRules, is(ResourceUtils.createExpectedSimpleAclRules(user)));
            assertThat(capturedAcls.get(0), is(nullValue()));

            async.flag();
        })));
}
 
Example 12
Source File: KafkaUserOperatorTest.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
@Test
public void testReconcileExistingTlsUser(VertxTestContext context)    {
    CrdOperator mockCrdOps = mock(CrdOperator.class);
    SecretOperator mockSecretOps = mock(SecretOperator.class);
    SimpleAclOperator aclOps = mock(SimpleAclOperator.class);
    ScramShaCredentialsOperator scramOps = mock(ScramShaCredentialsOperator.class);
    KafkaUserQuotasOperator quotasOps = mock(KafkaUserQuotasOperator.class);

    KafkaUserOperator op = new KafkaUserOperator(vertx, mockCertManager, mockCrdOps, Labels.EMPTY, mockSecretOps, scramOps, quotasOps, aclOps, ResourceUtils.CA_CERT_NAME, ResourceUtils.CA_KEY_NAME, ResourceUtils.NAMESPACE);
    KafkaUser user = ResourceUtils.createKafkaUserTls();
    Secret clientsCa = ResourceUtils.createClientsCaCertSecret();
    Secret clientsCaKey = ResourceUtils.createClientsCaKeySecret();
    Secret userCert = ResourceUtils.createUserSecretTls();

    ArgumentCaptor<String> secretNamespaceCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> secretNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<Secret> secretCaptor = ArgumentCaptor.forClass(Secret.class);
    when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), secretCaptor.capture())).thenReturn(Future.succeededFuture());

    when(scramOps.reconcile(any(), any())).thenReturn(Future.succeededFuture());

    ArgumentCaptor<String> aclNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<Set<SimpleAclRule>> aclRulesCaptor = ArgumentCaptor.forClass(Set.class);
    when(aclOps.reconcile(aclNameCaptor.capture(), aclRulesCaptor.capture())).thenReturn(Future.succeededFuture());

    when(mockSecretOps.get(eq(clientsCa.getMetadata().getNamespace()), eq(clientsCa.getMetadata().getName()))).thenReturn(clientsCa);
    when(mockSecretOps.get(eq(clientsCa.getMetadata().getNamespace()), eq(clientsCaKey.getMetadata().getName()))).thenReturn(clientsCaKey);
    when(mockSecretOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(userCert);

    when(mockCrdOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(user);
    when(mockCrdOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(user));
    when(mockCrdOps.updateStatusAsync(any(KafkaUser.class))).thenReturn(Future.succeededFuture());
    when(quotasOps.reconcile(any(), any())).thenReturn(Future.succeededFuture());

    Checkpoint async = context.checkpoint();
    op.reconcile(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME))
        .onComplete(context.succeeding(v -> context.verify(() -> {

            List<String> capturedNames = secretNameCaptor.getAllValues();
            assertThat(capturedNames, hasSize(1));
            assertThat(ResourceUtils.NAME, is(capturedNames.get(0)));

            List<String> capturedNamespaces = secretNamespaceCaptor.getAllValues();
            assertThat(capturedNamespaces, hasSize(1));
            assertThat(capturedNamespaces.get(0), is(ResourceUtils.NAMESPACE));

            List<Secret> capturedSecrets = secretCaptor.getAllValues();

            assertThat(capturedSecrets, hasSize(1));

            Secret captured = capturedSecrets.get(0);
            assertThat(captured.getMetadata().getName(), is(user.getMetadata().getName()));
            assertThat(captured.getMetadata().getNamespace(), is(user.getMetadata().getNamespace()));
            assertThat(captured.getMetadata().getLabels(),
                    is(Labels.fromMap(user.getMetadata().getLabels())
                            .withKubernetesName(KafkaUserModel.KAFKA_USER_OPERATOR_NAME)
                            .withKubernetesInstance(ResourceUtils.NAME)
                            .withKubernetesPartOf(ResourceUtils.NAME)
                            .withKubernetesManagedBy(KafkaUserModel.KAFKA_USER_OPERATOR_NAME)
                            .withStrimziKind(KafkaUser.RESOURCE_KIND)
                            .toMap()));
            assertThat(captured.getData().get("ca.crt"), is(userCert.getData().get("ca.crt")));
            assertThat(captured.getData().get("user.crt"), is(userCert.getData().get("user.crt")));
            assertThat(captured.getData().get("user.key"), is(userCert.getData().get("user.key")));

            List<String> capturedAclNames = aclNameCaptor.getAllValues();
            assertThat(capturedAclNames, hasSize(2));
            assertThat(capturedAclNames.get(0), is(KafkaUserModel.getTlsUserName(ResourceUtils.NAME)));
            assertThat(capturedAclNames.get(1), is(KafkaUserModel.getScramUserName(ResourceUtils.NAME)));

            List<Set<SimpleAclRule>> capturedAcls = aclRulesCaptor.getAllValues();

            assertThat(capturedAcls, hasSize(2));
            Set<SimpleAclRule> aclRules = capturedAcls.get(0);

            assertThat(aclRules, hasSize(ResourceUtils.createExpectedSimpleAclRules(user).size()));
            assertThat(aclRules, is(ResourceUtils.createExpectedSimpleAclRules(user)));
            assertThat(capturedAcls.get(1), is(nullValue()));

            async.flag();
        })));

}
 
Example 13
Source File: SimpleAclOperatorTest.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
@Test
public void testReconcileInternalUpdateCreatesNewAclsAndDeletesOldAcls(VertxTestContext context) {
    Admin mockAdminClient = mock(AdminClient.class);
    SimpleAclOperator aclOp = new SimpleAclOperator(vertx, mockAdminClient);

    ResourcePattern resource1 = new ResourcePattern(ResourceType.TOPIC, "my-topic", PatternType.LITERAL);
    ResourcePattern resource2 = new ResourcePattern(ResourceType.TOPIC, "my-topic2", PatternType.LITERAL);

    KafkaPrincipal foo = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "CN=foo");
    AclBinding readAclBinding = new AclBinding(resource1, new AccessControlEntry(foo.toString(), "*", org.apache.kafka.common.acl.AclOperation.READ, AclPermissionType.ALLOW));
    AclBinding writeAclBinding = new AclBinding(resource2, new AccessControlEntry(foo.toString(), "*", org.apache.kafka.common.acl.AclOperation.WRITE, AclPermissionType.ALLOW));

    SimpleAclRuleResource resource = new SimpleAclRuleResource("my-topic2", SimpleAclRuleResourceType.TOPIC, AclResourcePatternType.LITERAL);
    SimpleAclRule rule1 = new SimpleAclRule(AclRuleType.ALLOW, resource, "*", AclOperation.WRITE);

    ArgumentCaptor<Collection<AclBinding>> aclBindingsCaptor = ArgumentCaptor.forClass(Collection.class);
    ArgumentCaptor<Collection<AclBindingFilter>> aclBindingFiltersCaptor = ArgumentCaptor.forClass(Collection.class);
    assertDoesNotThrow(() -> {
        mockDescribeAcls(mockAdminClient, null, Collections.singleton(readAclBinding));
        mockCreateAcls(mockAdminClient, aclBindingsCaptor);
        mockDeleteAcls(mockAdminClient, Collections.singleton(readAclBinding), aclBindingFiltersCaptor);
    });

    Checkpoint async = context.checkpoint();
    aclOp.reconcile("CN=foo", new LinkedHashSet(asList(rule1)))
            .onComplete(context.succeeding(rr -> context.verify(() -> {

                // Create Write rule for resource 2
                Collection<AclBinding> capturedAclBindings = aclBindingsCaptor.getValue();
                assertThat(capturedAclBindings, hasSize(1));
                assertThat(capturedAclBindings, hasItem(writeAclBinding));
                Set<ResourcePattern> capturedResourcePatterns =
                        capturedAclBindings.stream().map(AclBinding::pattern).collect(Collectors.toSet());
                assertThat(capturedResourcePatterns, hasSize(1));
                assertThat(capturedResourcePatterns, hasItem(resource2));

                // Delete read rule for resource 1
                Collection<AclBindingFilter> capturedAclBindingFilters = aclBindingFiltersCaptor.getValue();
                assertThat(capturedAclBindingFilters, hasSize(1));
                assertThat(capturedAclBindingFilters, hasItem(readAclBinding.toFilter()));

                Set<ResourcePatternFilter> capturedResourcePatternFilters =
                        capturedAclBindingFilters.stream().map(AclBindingFilter::patternFilter).collect(Collectors.toSet());
                assertThat(capturedResourcePatternFilters, hasSize(1));
                assertThat(capturedResourcePatternFilters, hasItem(resource1.toFilter()));

                async.flag();
            })));
}
 
Example 14
Source File: KafkaBridgeAssemblyOperatorTest.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
@Test
public void testCreateOrUpdateWithScaleDown(VertxTestContext context) {
    int scaleTo = 1;

    ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
    CrdOperator mockBridgeOps = supplier.kafkaBridgeOperator;
    DeploymentOperator mockDcOps = supplier.deploymentOperations;
    PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
    ConfigMapOperator mockCmOps = supplier.configMapOperations;
    ServiceOperator mockServiceOps = supplier.serviceOperations;

    String clusterCmName = "foo";
    String clusterCmNamespace = "test";

    KafkaBridge clusterCm = ResourceUtils.createEmptyKafkaBridgeCluster(clusterCmNamespace, clusterCmName);
    clusterCm.getSpec().setReplicas(scaleTo); // Change replicas to create ScaleDown

    // Change replicas to create ScaleDown
    KafkaBridge scaledDownCluster = new KafkaBridgeBuilder(clusterCm)
            .editOrNewSpec()
                .withReplicas(scaleTo)
            .endSpec()
            .build();
    KafkaBridgeCluster bridge = KafkaBridgeCluster.fromCrd(clusterCm, VERSIONS);

    when(mockBridgeOps.get(clusterCmNamespace, clusterCmName)).thenReturn(clusterCm);
    when(mockBridgeOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(clusterCm));
    when(mockBridgeOps.updateStatusAsync(any(KafkaBridge.class))).thenReturn(Future.succeededFuture());
    when(mockServiceOps.get(clusterCmNamespace, bridge.getName())).thenReturn(bridge.generateService());
    Deployment dep = bridge.generateDeployment(new HashMap<>(), true, null, null);
    when(mockDcOps.get(clusterCmNamespace, bridge.getName())).thenReturn(dep);
    when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
    when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());

    when(mockServiceOps.reconcile(eq(clusterCmNamespace), any(), any())).thenReturn(Future.succeededFuture());

    when(mockDcOps.reconcile(eq(clusterCmNamespace), any(), any())).thenReturn(Future.succeededFuture());

    doAnswer(i -> Future.succeededFuture(scaleTo))
            .when(mockDcOps).scaleUp(clusterCmNamespace, bridge.getName(), scaleTo);

    doAnswer(i -> Future.succeededFuture(scaleTo))
            .when(mockDcOps).scaleDown(clusterCmNamespace, bridge.getName(), scaleTo);

    when(mockBridgeOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaBridge())));
    when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));
    when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new PodDisruptionBudget())));

    KafkaBridgeAssemblyOperator ops = new KafkaBridgeAssemblyOperator(vertx, new PlatformFeaturesAvailability(true, kubernetesVersion),
            new MockCertManager(), new PasswordGenerator(10, "a", "a"), supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS));

    Checkpoint async = context.checkpoint();
    ops.createOrUpdate(new Reconciliation("test-trigger", KafkaBridge.RESOURCE_KIND, clusterCmNamespace, clusterCmName), scaledDownCluster)
        .onComplete(context.succeeding(v -> context.verify(() -> {
            verify(mockDcOps).scaleUp(clusterCmNamespace, bridge.getName(), scaleTo);
            verify(mockDcOps).scaleDown(clusterCmNamespace, bridge.getName(), scaleTo);
            async.flag();
        })));
}
 
Example 15
Source File: KafkaUserOperatorTest.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
@Test
public void testReconcileDeleteTlsUser(VertxTestContext context)    {
    CrdOperator mockCrdOps = mock(CrdOperator.class);
    SecretOperator mockSecretOps = mock(SecretOperator.class);
    SimpleAclOperator aclOps = mock(SimpleAclOperator.class);
    ScramShaCredentialsOperator scramOps = mock(ScramShaCredentialsOperator.class);
    KafkaUserQuotasOperator quotasOps = mock(KafkaUserQuotasOperator.class);

    KafkaUserOperator op = new KafkaUserOperator(vertx, mockCertManager, mockCrdOps, Labels.EMPTY, mockSecretOps, scramOps, quotasOps, aclOps, ResourceUtils.CA_CERT_NAME, ResourceUtils.CA_KEY_NAME, ResourceUtils.NAMESPACE);
    KafkaUser user = ResourceUtils.createKafkaUserTls();
    Secret clientsCa = ResourceUtils.createClientsCaCertSecret();
    Secret userCert = ResourceUtils.createUserSecretTls();

    ArgumentCaptor<String> secretNamespaceCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> secretNameCaptor = ArgumentCaptor.forClass(String.class);
    when(mockSecretOps.reconcile(secretNamespaceCaptor.capture(), secretNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());

    when(scramOps.reconcile(any(), any())).thenReturn(Future.succeededFuture());

    ArgumentCaptor<String> aclNameCaptor = ArgumentCaptor.forClass(String.class);
    when(aclOps.reconcile(aclNameCaptor.capture(), isNull())).thenReturn(Future.succeededFuture());

    when(mockSecretOps.get(eq(clientsCa.getMetadata().getNamespace()), eq(clientsCa.getMetadata().getName()))).thenReturn(clientsCa);
    when(mockSecretOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(userCert);

    when(mockCrdOps.get(eq(user.getMetadata().getNamespace()), eq(user.getMetadata().getName()))).thenReturn(null);

    when(quotasOps.reconcile(anyString(), eq(null))).thenReturn(Future.succeededFuture());

    Checkpoint async = context.checkpoint();
    op.reconcile(new Reconciliation("test-trigger", KafkaUser.RESOURCE_KIND, ResourceUtils.NAMESPACE, ResourceUtils.NAME))
        .onComplete(context.succeeding(v -> context.verify(() -> {

            List<String> capturedNames = secretNameCaptor.getAllValues();
            assertThat(capturedNames, hasSize(1));
            assertThat(capturedNames.get(0), is(ResourceUtils.NAME));

            List<String> capturedNamespaces = secretNamespaceCaptor.getAllValues();
            assertThat(capturedNamespaces, hasSize(1));
            assertThat(capturedNamespaces.get(0), is(ResourceUtils.NAMESPACE));

            List<String> capturedAclNames = aclNameCaptor.getAllValues();
            assertThat(capturedAclNames, hasSize(2));
            assertThat(capturedAclNames.get(0), is(KafkaUserModel.getTlsUserName(ResourceUtils.NAME)));
            assertThat(capturedAclNames.get(1), is(KafkaUserModel.getScramUserName(ResourceUtils.NAME)));

            async.flag();
        })));
}
 
Example 16
Source File: KafkaConnectS2IAssemblyOperatorTest.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
@Test
public void testReconcile(VertxTestContext context) {
    ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
    CrdOperator mockConnectS2IOps = supplier.connectS2IOperator;
    DeploymentConfigOperator mockDcOps = supplier.deploymentConfigOperations;
    SecretOperator mockSecretOps = supplier.secretOperations;

    String clusterCmNamespace = "test";

    KafkaConnectS2I foo = ResourceUtils.createEmptyKafkaConnectS2ICluster(clusterCmNamespace, "foo");
    KafkaConnectS2I bar = ResourceUtils.createEmptyKafkaConnectS2ICluster(clusterCmNamespace, "bar");
    when(mockConnectS2IOps.listAsync(eq(clusterCmNamespace), any(Optional.class))).thenReturn(Future.succeededFuture(asList(foo, bar)));
    // when requested ConfigMap for a specific Kafka Connect S2I cluster
    when(mockConnectS2IOps.get(eq(clusterCmNamespace), eq("foo"))).thenReturn(foo);
    when(mockConnectS2IOps.get(eq(clusterCmNamespace), eq("bar"))).thenReturn(bar);
    when(mockConnectS2IOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(bar));
    when(mockConnectS2IOps.updateStatusAsync(any(KafkaConnectS2I.class))).thenReturn(Future.succeededFuture());

    // providing the list of ALL DeploymentConfigs for all the Kafka Connect S2I clusters
    Labels newLabels = Labels.forStrimziKind(KafkaConnectS2I.RESOURCE_KIND);
    when(mockDcOps.list(eq(clusterCmNamespace), eq(newLabels))).thenReturn(
            asList(KafkaConnectS2ICluster.fromCrd(bar, VERSIONS).generateDeploymentConfig(new HashMap<String, String>(), true, null, null)));

    // providing the list DeploymentConfigs for already "existing" Kafka Connect S2I clusters
    Labels barLabels = Labels.forStrimziCluster("bar");
    when(mockDcOps.list(eq(clusterCmNamespace), eq(barLabels))).thenReturn(
            asList(KafkaConnectS2ICluster.fromCrd(bar, VERSIONS).generateDeploymentConfig(new HashMap<String, String>(), true, null, null))
    );
    when(mockDcOps.readiness(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
    when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());

    when(mockSecretOps.reconcile(eq(clusterCmNamespace), any(), any())).thenReturn(Future.succeededFuture());

    Set<String> createdOrUpdated = new CopyOnWriteArraySet<>();

    Checkpoint createOrUpdateAsync = context.checkpoint(2);
    PlatformFeaturesAvailability pfa = new PlatformFeaturesAvailability(true, kubernetesVersion);
    KafkaConnectS2IAssemblyOperator ops = new KafkaConnectS2IAssemblyOperator(vertx, pfa,
            supplier, ResourceUtils.dummyClusterOperatorConfig(VERSIONS)) {

        @Override
        public Future<Void> createOrUpdate(Reconciliation reconciliation, KafkaConnectS2I kafkaConnectS2IAssembly) {
            createdOrUpdated.add(kafkaConnectS2IAssembly.getMetadata().getName());
            createOrUpdateAsync.flag();
            return Future.succeededFuture();
        }
    };

    Checkpoint async = context.checkpoint();
    // Now try to reconcile all the Kafka Connect S2I clusters
    ops.reconcileAll("test", clusterCmNamespace,
        context.succeeding(v -> context.verify(() -> {
            assertThat(createdOrUpdated, is(new HashSet(asList("foo", "bar"))));
            async.flag();
        })));

}
 
Example 17
Source File: ValidationHandlerProcessorsIntegrationTest.java    From vertx-web with Apache License 2.0 4 votes vote down vote up
@Test
public void testHeaderParamsAsync(VertxTestContext testContext) {
  Checkpoint checkpoint = testContext.checkpoint(4);

  ValidationHandler validationHandler = ValidationHandlerBuilder
    .create(parser)
    .headerParameter(param("x-a", stringSchema()))
    .headerParameter(param("x-b", booleanSchema()))
    .headerParameter(param("x-c", ref(JsonPointer.fromURI(URI.create("int_schema.json"))), ValueParser.LONG_PARSER))
    .build();
  router.get("/test")
    .handler(validationHandler)
    .handler(routingContext -> {
      RequestParameters params = routingContext.get("parsedParameters");
      routingContext
        .response()
        .setStatusMessage(String
          .format("%s%s%s", params.headerParameter("x-a"), params.headerParameter("x-b"), params.headerParameter("x-c"))
        ).end();
    });
  String a = "hello";
  String b = "false";
  String c = "10";

  testRequest(client, HttpMethod.GET, "/test")
    .with(requestHeader("x-a", a), requestHeader("x-b", b), requestHeader("x-c", c))
    .expect(statusCode(200), statusMessage(a + b + c))
    .send(testContext, checkpoint);

  testRequest(client, HttpMethod.GET, "/test")
    .with(requestHeader("x-a", a), requestHeader("x-b", "bla"), requestHeader("x-c", c))
    .expect(badParameterResponse(
      ParameterProcessorException.ParameterProcessorErrorType.PARSING_ERROR,
      "x-b",
      ParameterLocation.HEADER
    ))
    .send(testContext, checkpoint);

  testRequest(client, HttpMethod.GET, "/test")
    .with(requestHeader("x-a", a), requestHeader("x-b", b), requestHeader("x-c", "bla"))
    .expect(badParameterResponse(
      ParameterProcessorException.ParameterProcessorErrorType.PARSING_ERROR,
      "x-c",
      ParameterLocation.HEADER
    ))
    .send(testContext, checkpoint);

  testRequest(client, HttpMethod.GET, "/test")
    .with(requestHeader("x-a", a), requestHeader("x-b", b), requestHeader("x-c", "15"))
    .expect(badParameterResponse(
      ParameterProcessorException.ParameterProcessorErrorType.VALIDATION_ERROR,
      "x-c",
      ParameterLocation.HEADER
    ))
    .send(testContext, checkpoint);
}
 
Example 18
Source File: ValidationHandlerProcessorsIntegrationTest.java    From vertx-web with Apache License 2.0 4 votes vote down vote up
@Test
public void testQueryJsonObjectAsyncParam(VertxTestContext testContext) {
  Checkpoint checkpoint = testContext.checkpoint(2);

  ValidationHandler validationHandler = ValidationHandlerBuilder
    .create(parser)
    .queryParameter(Parameters.jsonParam("myTree", ref(JsonPointer.fromURI(URI.create("tree_schema.json")))))
    .build();
  router
    .get("/test")
    .handler(validationHandler)
    .handler(routingContext -> {
      RequestParameters params = routingContext.get("parsedParameters");
      routingContext
        .response()
        .putHeader("content-type", "application/json")
        .end(params.queryParameter("myTree").getJsonObject().toBuffer());
    });

  JsonObject testSuccessObj = new JsonObject()
    .put("value", "aaa")
    .put("childs", new JsonArray().add(
      new JsonObject().put("value", "bbb")
    ));

  testRequest(client, HttpMethod.GET, "/test?myTree=" + urlEncode(testSuccessObj.encode()))
    .expect(statusCode(200), jsonBodyResponse(testSuccessObj))
    .send(testContext, checkpoint);

  JsonObject testFailureObj = testSuccessObj.copy();
  testFailureObj.remove("value");

  testRequest(client, HttpMethod.GET, "/test?myTree=" + urlEncode(testFailureObj.encode()))
    .expect(statusCode(400))
    .expect(badParameterResponse(
      ParameterProcessorException.ParameterProcessorErrorType.VALIDATION_ERROR,
      "myTree",
      ParameterLocation.QUERY
    ))
    .send(testContext, checkpoint);
}
 
Example 19
Source File: KafkaRebalanceAssemblyOperatorTest.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
/**
 * Tests the transition from New to PendingProposal and then Stopped (via annotation)
 *
 * 1. A new rebalance resource is created; it is in the New state
 * 2. The operator requests a rebalance proposal to Cruise Control REST API
 * 3. The rebalance proposal is not ready yet; the operator starts polling the Cruise Control REST API
 * 4. The rebalance resource moves to PendingProposal state
 * 6. While the operator is waiting for the proposal, the rebalance resource is annotated with strimzi.io/rebalance=stop
 * 7. The operator stops polling the Cruise Control REST API
 * 8. The rebalance resource moves to Stopped state
 */
@Test
public void testNewToPendingProposalToStoppedRebalance(Vertx vertx, VertxTestContext context) throws IOException, URISyntaxException {

    // Setup the rebalance endpoint with the number of pending calls before a response is received.
    MockCruiseControl.setupCCRebalanceResponse(ccServer, 5);

    KafkaRebalance kr =
            createKafkaRebalance(CLUSTER_NAMESPACE, CLUSTER_NAME, RESOURCE_NAME, new KafkaRebalanceSpecBuilder().build());

    Crds.kafkaRebalanceOperation(kubernetesClient).inNamespace(CLUSTER_NAMESPACE).create(kr);

    when(mockKafkaOps.getAsync(CLUSTER_NAMESPACE, CLUSTER_NAME)).thenReturn(Future.succeededFuture(kafka));
    mockRebalanceOperator(mockRebalanceOps, CLUSTER_NAMESPACE, RESOURCE_NAME, kubernetesClient, new Runnable() {
            int count = 0;

            @Override
            public void run() {
                if (++count == 4) {
                    // after a while, apply the "stop" annotation to the resource in the PendingProposal state
                    annotate(kubernetesClient, CLUSTER_NAMESPACE, RESOURCE_NAME, KafkaRebalanceAssemblyOperator.RebalanceAnnotation.stop);
                }
                return;
            }
        });

    Checkpoint checkpoint = context.checkpoint();
    kcrao.reconcileRebalance(
            new Reconciliation("test-trigger", KafkaRebalance.RESOURCE_KIND, CLUSTER_NAMESPACE, RESOURCE_NAME),
            kr).onComplete(context.succeeding(v -> {
                // the resource moved from New to PendingProposal (due to the configured Mock server pending calls)
                assertState(context, kubernetesClient, CLUSTER_NAMESPACE, RESOURCE_NAME, KafkaRebalanceAssemblyOperator.State.PendingProposal);
            })).compose(v -> {
                // trigger another reconcile to process the PendingProposal state
                KafkaRebalance kr1 = Crds.kafkaRebalanceOperation(kubernetesClient).inNamespace(CLUSTER_NAMESPACE).withName(RESOURCE_NAME).get();

                return kcrao.reconcileRebalance(
                        new Reconciliation("test-trigger", KafkaRebalance.RESOURCE_KIND, CLUSTER_NAMESPACE, RESOURCE_NAME),
                        kr1);
            }).onComplete(context.succeeding(v -> {
                // the resource moved from ProposalPending to Stopped
                assertState(context, kubernetesClient, CLUSTER_NAMESPACE, RESOURCE_NAME, KafkaRebalanceAssemblyOperator.State.Stopped);
                checkpoint.flag();
            }));
}
 
Example 20
Source File: KafkaMirrorMakerAssemblyOperatorTest.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
@Test
public void testUpdateClusterFailure(VertxTestContext context) {
    ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(true);
    CrdOperator mockMirrorOps = supplier.mirrorMakerOperator;
    DeploymentOperator mockDcOps = supplier.deploymentOperations;
    PodDisruptionBudgetOperator mockPdbOps = supplier.podDisruptionBudgetOperator;
    ConfigMapOperator mockCmOps = supplier.configMapOperations;

    String clusterCmName = "foo";
    String clusterCmNamespace = "test";

    KafkaMirrorMakerConsumerSpec consumer = new KafkaMirrorMakerConsumerSpecBuilder()
            .withBootstrapServers(consumerBootstrapServers)
            .withGroupId(groupId)
            .withNumStreams(numStreams)
            .build();
    KafkaMirrorMakerProducerSpec producer = new KafkaMirrorMakerProducerSpecBuilder()
            .withBootstrapServers(producerBootstrapServers)
            .build();
    Map<String, Object> metricsCm = new HashMap<>();
    metricsCm.put("foo", "bar");
    KafkaMirrorMaker clusterCm = ResourceUtils.createKafkaMirrorMakerCluster(clusterCmNamespace, clusterCmName, image, producer, consumer, whitelist, metricsCm);
    KafkaMirrorMakerCluster mirror = KafkaMirrorMakerCluster.fromCrd(clusterCm,
            VERSIONS);
    clusterCm.getSpec().setImage("some/different:image"); // Change the image to generate some diff

    when(mockMirrorOps.get(clusterCmNamespace, clusterCmName)).thenReturn(clusterCm);
    when(mockDcOps.get(clusterCmNamespace, mirror.getName())).thenReturn(mirror.generateDeployment(new HashMap<String, String>(), true, null, null));
    when(mockDcOps.readiness(eq(clusterCmNamespace), eq(mirror.getName()), anyLong(), anyLong())).thenReturn(Future.succeededFuture());
    when(mockDcOps.waitForObserved(anyString(), anyString(), anyLong(), anyLong())).thenReturn(Future.succeededFuture());

    ArgumentCaptor<String> dcNamespaceCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> dcNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<Deployment> dcCaptor = ArgumentCaptor.forClass(Deployment.class);
    when(mockDcOps.reconcile(dcNamespaceCaptor.capture(), dcNameCaptor.capture(), dcCaptor.capture())).thenReturn(Future.failedFuture("Failed"));

    ArgumentCaptor<String> dcScaleUpNamespaceCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> dcScaleUpNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<Integer> dcScaleUpReplicasCaptor = ArgumentCaptor.forClass(Integer.class);
    when(mockDcOps.scaleUp(dcScaleUpNamespaceCaptor.capture(), dcScaleUpNameCaptor.capture(), dcScaleUpReplicasCaptor.capture())).thenReturn(Future.succeededFuture());

    ArgumentCaptor<String> dcScaleDownNamespaceCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<String> dcScaleDownNameCaptor = ArgumentCaptor.forClass(String.class);
    ArgumentCaptor<Integer> dcScaleDownReplicasCaptor = ArgumentCaptor.forClass(Integer.class);
    when(mockDcOps.scaleDown(dcScaleDownNamespaceCaptor.capture(), dcScaleDownNameCaptor.capture(), dcScaleDownReplicasCaptor.capture())).thenReturn(Future.succeededFuture());

    when(mockPdbOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture());

    when(mockMirrorOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new KafkaMirrorMaker())));
    when(mockMirrorOps.getAsync(anyString(), anyString())).thenReturn(Future.succeededFuture(clusterCm));
    when(mockMirrorOps.updateStatusAsync(any(KafkaMirrorMaker.class))).thenReturn(Future.succeededFuture());
    when(mockCmOps.reconcile(anyString(), any(), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(new ConfigMap())));

    KafkaMirrorMakerAssemblyOperator ops = new KafkaMirrorMakerAssemblyOperator(vertx,
            new PlatformFeaturesAvailability(true, kubernetesVersion),
            new MockCertManager(), new PasswordGenerator(10, "a", "a"),
            supplier,
            ResourceUtils.dummyClusterOperatorConfig(VERSIONS));

    Checkpoint async = context.checkpoint();
    ops.createOrUpdate(new Reconciliation("test-trigger", KafkaMirrorMaker.RESOURCE_KIND, clusterCmNamespace, clusterCmName), clusterCm)
        .onComplete(context.failing(v -> context.verify(() -> async.flag())));
}