io.fabric8.kubernetes.api.model.Secret Java Examples

The following examples show how to use io.fabric8.kubernetes.api.model.Secret. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaUserModelTest.java    From strimzi-kafka-operator with Apache License 2.0 7 votes vote down vote up
@Test
public void testGenerateSecretGeneratesCertificateAtCaChange() {
    Secret userCert = ResourceUtils.createUserSecretTls();
    Secret clientsCaCertSecret = ResourceUtils.createClientsCaCertSecret();
    clientsCaCertSecret.getData().put("ca.crt", Base64.getEncoder().encodeToString("different-clients-ca-crt".getBytes()));

    Secret clientsCaKeySecret = ResourceUtils.createClientsCaKeySecret();
    clientsCaKeySecret.getData().put("ca.key", Base64.getEncoder().encodeToString("different-clients-ca-key".getBytes()));

    KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, tlsUser, clientsCaCertSecret, clientsCaKeySecret, userCert);
    Secret generatedSecret = model.generateSecret();

    assertThat(new String(model.decodeFromSecret(generatedSecret, "ca.crt")),  is("different-clients-ca-crt"));
    assertThat(new String(model.decodeFromSecret(generatedSecret, "user.crt")), is("crt file"));
    assertThat(new String(model.decodeFromSecret(generatedSecret, "user.key")), is("key file"));
    assertThat(new String(model.decodeFromSecret(generatedSecret, "user.p12")), is("key store"));
    assertThat(new String(model.decodeFromSecret(generatedSecret, "user.password")), is("aaaaaaaaaa"));

    // Check owner reference
    checkOwnerReference(model.createOwnerReference(), generatedSecret);
}
 
Example #2
Source File: ZookeeperScaler.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
/**
 * ZookeeperScaler constructor
 *
 * @param vertx                         Vertx instance
 * @param zookeeperConnectionString     Connection string to connect to the right Zookeeper
 * @param zkNodeAddress                 Function for generating the Zookeeper node addresses
 * @param clusterCaCertSecret           Secret with Kafka cluster CA public key
 * @param coKeySecret                   Secret with Cluster Operator public and private key
 * @param operationTimeoutMs            Operation timeout
 *
 * @return  ZookeeperScaler instance
 */
protected ZookeeperScaler(Vertx vertx, ZooKeeperAdminProvider zooAdminProvider, String zookeeperConnectionString, Function<Integer, String> zkNodeAddress, Secret clusterCaCertSecret, Secret coKeySecret, long operationTimeoutMs) {
    log.debug("Creating Zookeeper Scaler for cluster {}", zookeeperConnectionString);

    this.vertx = vertx;
    this.zooAdminProvider = zooAdminProvider;
    this.zookeeperConnectionString = zookeeperConnectionString;
    this.zkNodeAddress = zkNodeAddress;
    this.operationTimeoutMs = operationTimeoutMs;

    // Setup truststore from PEM file in cluster CA secret
    // We cannot use P12 because of custom CAs which for simplicity provide only PEM
    PasswordGenerator pg = new PasswordGenerator(12);
    trustStorePassword = pg.generate();
    trustStoreFile = Util.createFileTrustStore(getClass().getName(), "p12", Ca.cert(clusterCaCertSecret, Ca.CA_CRT), trustStorePassword.toCharArray());

    // Setup keystore from PKCS12 in cluster-operator secret
    keyStorePassword = new String(Util.decodeFromSecret(coKeySecret, "cluster-operator.password"), StandardCharsets.US_ASCII);
    keyStoreFile = Util.createFileStore(getClass().getName(), "p12", Util.decodeFromSecret(coKeySecret, "cluster-operator.p12"));
}
 
Example #3
Source File: ZookeeperLeaderFinder.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
/**
 * Returns a Future which completes with the the id of the Zookeeper leader.
 * An exponential backoff is used if no ZK node is leader on the attempt to find it.
 * If there is no leader after 3 attempts then the returned Future completes with {@link #UNKNOWN_LEADER}.
 */
Future<Integer> findZookeeperLeader(String cluster, String namespace, List<Pod> pods, Secret coKeySecret) {
    if (pods.size() <= 1) {
        return Future.succeededFuture(pods.size() - 1);
    }
    String clusterCaSecretName = KafkaResources.clusterCaCertificateSecretName(cluster);
    Future<Secret> clusterCaKeySecretFuture = secretOperator.getAsync(namespace, clusterCaSecretName);
    return clusterCaKeySecretFuture.compose(clusterCaCertificateSecret -> {
        if (clusterCaCertificateSecret  == null) {
            return Future.failedFuture(Util.missingSecretException(namespace, clusterCaSecretName));
        }
        try {
            NetClientOptions netClientOptions = clientOptions(coKeySecret, clusterCaCertificateSecret);
            return zookeeperLeader(cluster, namespace, pods, netClientOptions);
        } catch (Throwable e) {
            return Future.failedFuture(e);
        }
    });

}
 
Example #4
Source File: SecretHandler.java    From module-ballerina-kubernetes with Apache License 2.0 6 votes vote down vote up
private void generate(SecretModel secretModel) throws KubernetesPluginException {
    Secret secret = new SecretBuilder()
            .withNewMetadata()
            .withNamespace(dataHolder.getNamespace())
            .withName(secretModel.getName())
            .endMetadata()
            .withData(secretModel.getData())
            .build();
    try {
        String secretContent = SerializationUtils.dumpWithoutRuntimeStateAsYaml(secret);
        KubernetesUtils.writeToFile(secretContent, SECRET_FILE_POSTFIX +
                YAML);
    } catch (IOException e) {
        String errorMessage = "error while generating yaml file for secret: " + secretModel.getName();
        throw new KubernetesPluginException(errorMessage, e);
    }
}
 
Example #5
Source File: ZookeeperLeaderFinder.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
/**
 * Validate the cluster CA certificate(s) passed in the given Secret
 * and return the PemTrustOptions for trusting them.
 */
protected PemTrustOptions trustOptions(Secret clusterCaCertificateSecret) {
    Base64.Decoder decoder = Base64.getDecoder();
    CertificateFactory x509 = x509Factory();
    PemTrustOptions pto = new PemTrustOptions();
    for (Map.Entry<String, String> entry : clusterCaCertificateSecret.getData().entrySet()) {
        String entryName = entry.getKey();
        if (entryName.endsWith(".crt")) {
            log.info("Trusting certificate {} from Secret {}", entryName, clusterCaCertificateSecret.getMetadata().getName());
            byte[] certBytes = decoder.decode(entry.getValue());
            try {
                x509.generateCertificate(new ByteArrayInputStream(certBytes));
            } catch (CertificateException e) {
                throw corruptCertificate(clusterCaCertificateSecret, entryName, e);
            }
            pto.addCertValue(Buffer.buffer(certBytes));
        } else {
            log.warn("Ignoring non-certificate {} in Secret {}", entryName, clusterCaCertificateSecret.getMetadata().getName());
        }
    }
    return pto;
}
 
Example #6
Source File: RabbitManagementApiProvider.java    From rabbitmq-operator with Apache License 2.0 6 votes vote down vote up
public RabbitManagementApiFacade getApi(final RabbitMQConnectionInfo connectionInfo) {
    if (rabbitApis.containsKey(connectionInfo)) {
        return rabbitApis.get(connectionInfo);
    }

    synchronized (rabbitApis) {
        if (rabbitApis.containsKey(connectionInfo)) {
            return rabbitApis.get(connectionInfo);
        }

        final Secret adminSecret = secretsController.get(RabbitMQSecrets.getClusterSecretName(connectionInfo.getClusterName()), connectionInfo.getNamespace());
        final OkHttpClient.Builder okHttpClientBuilder = new OkHttpClient.Builder().addInterceptor(new RabbitManagementApiLogger());
        final RabbitManagementApi api = RabbitManagementApiFactory.newInstance(
                okHttpClientBuilder,
                buildApiUri(connectionInfo),
                secretsController.decodeSecretPayload(adminSecret.getData().get(Constants.Secrets.USERNAME_KEY)),
                secretsController.decodeSecretPayload(adminSecret.getData().get(Constants.Secrets.PASSWORD_KEY))
        );

        final RabbitManagementApiFacade facade = new RabbitManagementApiFacade(api);
        rabbitApis.put(connectionInfo, facade);

        return facade;
    }
}
 
Example #7
Source File: SecretUtils.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
public static void waitForClusterSecretsDeletion(String clusterName) {
    LOGGER.info("Waiting for Secret {} deletion", clusterName);
    TestUtils.waitFor("Secret " + clusterName + " deletion", Constants.POLL_INTERVAL_FOR_RESOURCE_READINESS, Constants.TIMEOUT_FOR_SECRET_CREATION,
        () -> {
            List<Secret> secretList = kubeClient().listSecrets(Labels.STRIMZI_CLUSTER_LABEL, clusterName);
            if (secretList.isEmpty()) {
                return true;
            } else {
                for (Secret secret : secretList) {
                    LOGGER.warn("Secret {} is not deleted yet! Triggering force delete by cmd client!", secret.getMetadata().getName());
                    cmdKubeClient().deleteByName("secret", secret.getMetadata().getName());
                }
                return false;
            }
        });
    LOGGER.info("Secret {} deleted", clusterName);
}
 
Example #8
Source File: KafkaRollerTest.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
private TestingKafkaRoller(StatefulSet sts, Secret clusterCaCertSecret, Secret coKeySecret,
                          PodOperator podOps,
                          RuntimeException acOpenException, Throwable acCloseException,
                          Throwable controllerException,
                          Function<Integer, Future<Boolean>> canRollFn,
                          int... controllers) {
    super(KafkaRollerTest.vertx, podOps, 500, 1000,
        () -> new BackOff(10L, 2, 4),
        sts, clusterCaCertSecret, coKeySecret);
    this.controllers = controllers;
    this.controllerCall = 0;
    this.acOpenException = acOpenException;
    this.controllerException = controllerException;
    this.acCloseException = acCloseException;
    this.canRollFn = canRollFn;
    this.unclosedAdminClients = new IdentityHashMap<>();
}
 
Example #9
Source File: EnvironmentVariableSecretApplierTest.java    From che with Eclipse Public License 2.0 6 votes vote down vote up
@Test(
    expectedExceptions = InfrastructureException.class,
    expectedExceptionsMessageRegExp =
        "Unable to mount secret 'test_secret': It is configured to be mount as a environment variable, but its name was not specified. Please define the 'che.eclipse.org/env-name' annotation on the secret to specify it.")
public void shouldThrowExceptionWhenNoEnvNameSpecifiedSingleValue() throws Exception {
  Container container_match = new ContainerBuilder().withName("maven").build();

  when(podSpec.getContainers()).thenReturn(ImmutableList.of(container_match));

  Secret secret =
      new SecretBuilder()
          .withData(singletonMap("foo", "random"))
          .withMetadata(
              new ObjectMetaBuilder()
                  .withName("test_secret")
                  .withAnnotations(
                      ImmutableMap.of(ANNOTATION_MOUNT_AS, "env", ANNOTATION_AUTOMOUNT, "true"))
                  .withLabels(emptyMap())
                  .build())
          .build();

  when(secrets.get(any(LabelSelector.class))).thenReturn(singletonList(secret));
  secretApplier.applySecret(environment, runtimeIdentity, secret);
}
 
Example #10
Source File: SecretCertProvider.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
/**
 * Create a Kubernetes secret containing the provided private key and related certificate
 *
 * @param namespace Namespace
 * @param name Secret name
 * @param keyKey key field in the Secret data section for the private key
 * @param certKey key field in the Secret data section for the certificate
 * @param key private key to store
 * @param cert certificate to store
 * @param storeKey key field in the Secret data section for the PKCS12 store
 * @param storePasswordKey key field in the Secret data section for the PKCS12 store password
 * @param store PKCS12 store
 * @param storePassword PKCS12 store password
 * @param labels Labels to add to the Secret
 * @param annotations annotations to add to the Secret
 * @param ownerReference owner of the Secret
 * @return the Secret
 */
public Secret createSecret(String namespace, String name,
                           String keyKey, String certKey,
                           byte[] key, byte[] cert,
                           String storeKey, String storePasswordKey,
                           byte[] store, byte[] storePassword,
                           Map<String, String> labels, Map<String, String> annotations,
                           OwnerReference ownerReference) {
    Map<String, String> data = new HashMap<>(4);

    Base64.Encoder encoder = Base64.getEncoder();

    data.put(keyKey, encoder.encodeToString(key));
    data.put(certKey, encoder.encodeToString(cert));
    if (store != null) {
        data.put(storeKey, encoder.encodeToString(store));
    }
    if (storePassword != null) {
        data.put(storePasswordKey, encoder.encodeToString(storePassword));
    }

    return createSecret(namespace, name, data, labels, annotations, ownerReference);
}
 
Example #11
Source File: KafkaUserModelTest.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
@Test
public void testGenerateSecretGeneratesPasswordFromExistingTlsSecret()    {
    Secret userCert = ResourceUtils.createUserSecretTls();
    KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, scramShaUser, clientsCaCert, clientsCaKey, userCert);
    Secret generated = model.generateSecret();

    assertThat(generated.getMetadata().getName(), is(ResourceUtils.NAME));
    assertThat(generated.getMetadata().getNamespace(), is(ResourceUtils.NAMESPACE));
    assertThat(generated.getMetadata().getLabels(),
            is(Labels.fromMap(ResourceUtils.LABELS)
                    .withStrimziKind(KafkaUser.RESOURCE_KIND)
                    .withKubernetesName(KafkaUserModel.KAFKA_USER_OPERATOR_NAME)
                    .withKubernetesInstance(ResourceUtils.NAME)
                    .withKubernetesPartOf(ResourceUtils.NAME)
                    .withKubernetesManagedBy(KafkaUserModel.KAFKA_USER_OPERATOR_NAME)
                    .toMap()));

    assertThat(generated.getData().keySet(), is(singleton("password")));
    assertThat(new String(Base64.getDecoder().decode(generated.getData().get(KafkaUserModel.KEY_PASSWORD))), is("aaaaaaaaaa"));

    // Check owner reference
    checkOwnerReference(model.createOwnerReference(), generated);
}
 
Example #12
Source File: PkiRefresh.java    From vault-crd with Apache License 2.0 6 votes vote down vote up
private boolean certificateIsNearExpirationDate(Secret secretByVault) {

        if (secretByVault.getMetadata().getAnnotations() != null) {
            String expiration = secretByVault.getMetadata().getAnnotations().get(crdName + COMPARE_ANNOTATION);

            Optional<Date> expirationDate = parseDate(expiration);
            if (expirationDate.isPresent()) {

                Date nextIntervals = new Date();
                nextIntervals.setTime(nextIntervals.getTime() + (interval * 1000 * 5));

                return nextIntervals.after(expirationDate.get());
            } else {
                log.error("Failed to parse date of secret {} in namespace {}", secretByVault.getMetadata().getName(), secretByVault.getMetadata().getNamespace());
            }
        }

        return true;
    }
 
Example #13
Source File: StatefulSetOperator.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
/**
 * Asynchronously perform a rolling update of all the pods in the StatefulSet identified by the given
 * {@code namespace} and {@code name}, returning a Future that will complete when the rolling update
 * is complete. Starting with pod 0, each pod will be deleted and re-created automatically by the ReplicaSet,
 * once the pod has been recreated then given {@code isReady} function will be polled until it returns true,
 * before the process proceeds with the pod with the next higher number.
 * @param sts The StatefulSet
 * @param podNeedsRestart Predicate for deciding whether the pod needs to be restarted.
 * @return A future that completes when any necessary rolling has been completed.
 */
public Future<Void> maybeRollingUpdate(StatefulSet sts, Function<Pod, String> podNeedsRestart) {
    String cluster = sts.getMetadata().getLabels().get(Labels.STRIMZI_CLUSTER_LABEL);
    String namespace = sts.getMetadata().getNamespace();
    Future<Secret> clusterCaKeySecretFuture = secretOperations.getAsync(
            namespace, KafkaResources.clusterCaCertificateSecretName(cluster));
    Future<Secret> coKeySecretFuture = secretOperations.getAsync(
            namespace, ClusterOperator.secretName(cluster));
    return CompositeFuture.join(clusterCaKeySecretFuture, coKeySecretFuture).compose(compositeFuture -> {
        Secret clusterCaKeySecret = compositeFuture.resultAt(0);
        if (clusterCaKeySecret == null) {
            return Future.failedFuture(Util.missingSecretException(namespace, KafkaCluster.clusterCaKeySecretName(cluster)));
        }
        Secret coKeySecret = compositeFuture.resultAt(1);
        if (coKeySecret == null) {
            return Future.failedFuture(Util.missingSecretException(namespace, ClusterOperator.secretName(cluster)));
        }
        return maybeRollingUpdate(sts, podNeedsRestart, clusterCaKeySecret, coKeySecret);
    });
}
 
Example #14
Source File: CamelKPublishHandler.java    From syndesis with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({"unchecked"})
protected StateUpdate createIntegration(IntegrationDeployment integrationDeployment, CustomResourceDefinition integrationCRD) {

    logInfo(integrationDeployment,"Creating Camel-K resource");

    prepareDeployment(integrationDeployment);

    io.syndesis.server.controller.integration.camelk.crd.Integration camelkIntegration = createIntegrationCR(integrationDeployment);
    camelkIntegration = applyCustomizers(integrationDeployment, camelkIntegration);

    Secret camelkSecret = createIntegrationSecret(integrationDeployment);

    getOpenShiftService().createOrReplaceSecret(camelkSecret);
    getOpenShiftService().createOrReplaceCR(integrationCRD,
        io.syndesis.server.controller.integration.camelk.crd.Integration.class,
        IntegrationList.class,
        DoneableIntegration.class,
        camelkIntegration);
    logInfo(integrationDeployment,"Camel-K resource created "+camelkIntegration.getMetadata().getName());
    return new StateUpdate(IntegrationDeploymentState.Pending, Collections.emptyMap());
}
 
Example #15
Source File: ZookeeperClusterTest.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
@Test
public void testGenerateBrokerSecret() throws CertificateParsingException {
    ClusterCa clusterCa = new ClusterCa(new OpenSslCertManager(), new PasswordGenerator(10, "a", "a"), cluster, null, null);
    clusterCa.createRenewOrReplace(namespace, cluster, emptyMap(), null, true);

    Secret secret = zc.generateNodesSecret(clusterCa, ka, true);
    assertThat(secret.getData().keySet(), is(set(
            "foo-zookeeper-0.crt",  "foo-zookeeper-0.key", "foo-zookeeper-0.p12", "foo-zookeeper-0.password",
            "foo-zookeeper-1.crt", "foo-zookeeper-1.key", "foo-zookeeper-1.p12", "foo-zookeeper-1.password",
            "foo-zookeeper-2.crt", "foo-zookeeper-2.key", "foo-zookeeper-2.p12", "foo-zookeeper-2.password")));
    X509Certificate cert = Ca.cert(secret, "foo-zookeeper-0.crt");
    assertThat(cert.getSubjectDN().getName(), is("CN=foo-zookeeper, O=io.strimzi"));
    assertThat(new HashSet<Object>(cert.getSubjectAlternativeNames()), is(set(
            asList(2, "foo-zookeeper-0.foo-zookeeper-nodes.test.svc"),
            asList(2, "foo-zookeeper-0.foo-zookeeper-nodes.test.svc.cluster.local"),
            asList(2, "foo-zookeeper-client"),
            asList(2, "foo-zookeeper-client.test"),
            asList(2, "foo-zookeeper-client.test.svc"),
            asList(2, "foo-zookeeper-client.test.svc.cluster.local"),
            asList(2, "*.foo-zookeeper-client.test.svc"),
            asList(2, "*.foo-zookeeper-client.test.svc.cluster.local"),
            asList(2, "*.foo-zookeeper-nodes.test.svc"),
            asList(2, "*.foo-zookeeper-nodes.test.svc.cluster.local"))));

}
 
Example #16
Source File: KafkaUserModelTest.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
@Test
public void testGenerateSecretGeneratesPasswordKeepingExistingScramShaPassword()    {
    Secret scramShaSecret = ResourceUtils.createUserSecretScramSha();
    String existingPassword = scramShaSecret.getData().get(KafkaUserModel.KEY_PASSWORD);
    KafkaUserModel model = KafkaUserModel.fromCrd(mockCertManager, passwordGenerator, scramShaUser, clientsCaCert, clientsCaKey, scramShaSecret);
    Secret generated = model.generateSecret();

    assertThat(generated.getMetadata().getName(), is(ResourceUtils.NAME));
    assertThat(generated.getMetadata().getNamespace(), is(ResourceUtils.NAMESPACE));
    assertThat(generated.getMetadata().getLabels(),
            is(Labels.fromMap(ResourceUtils.LABELS)
                    .withKubernetesName(KafkaUserModel.KAFKA_USER_OPERATOR_NAME)
                    .withKubernetesInstance(ResourceUtils.NAME)
                    .withKubernetesPartOf(ResourceUtils.NAME)
                    .withKubernetesManagedBy(KafkaUserModel.KAFKA_USER_OPERATOR_NAME)
                    .withStrimziKind(KafkaUser.RESOURCE_KIND)
                    .toMap()));
    assertThat(generated.getData().keySet(), is(singleton(KafkaUserModel.KEY_PASSWORD)));
    assertThat(generated.getData(), hasEntry(KafkaUserModel.KEY_PASSWORD, existingPassword));

    // Check owner reference
    checkOwnerReference(model.createOwnerReference(), generated);
}
 
Example #17
Source File: KafkaAssemblyOperator.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
Future<ReconciliationState> kafkaJmxSecret() {
    if (kafkaCluster.isJmxAuthenticated()) {
        Future<Secret> secretFuture = secretOperations.getAsync(namespace, KafkaCluster.jmxSecretName(name));
        return secretFuture.compose(res -> {
            if (res == null) {
                return withVoid(secretOperations.reconcile(namespace, KafkaCluster.jmxSecretName(name),
                        kafkaCluster.generateJmxSecret()));
            }
            return withVoid(Future.succeededFuture(this));
        });

    }
    return withVoid(secretOperations.reconcile(namespace, KafkaCluster.jmxSecretName(name), null));
}
 
Example #18
Source File: ZookeeperLeaderFinderTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Test
public void testSecretsCorrupted(VertxTestContext context) {
    SecretOperator mock = mock(SecretOperator.class);
    ZookeeperLeaderFinder finder = new ZookeeperLeaderFinder(vertx, mock, this::backoff);

    when(mock.getAsync(eq(NAMESPACE), eq(KafkaResources.clusterCaCertificateSecretName(CLUSTER))))
            .thenReturn(Future.succeededFuture(
                    new SecretBuilder()
                            .withNewMetadata()
                            .withName(KafkaResources.clusterCaCertificateSecretName(CLUSTER))
                            .withNamespace(NAMESPACE)
                            .endMetadata()
                            .withData(map(Ca.CA_CRT, "notacert"))
                            .build()));

    Secret secretWithBadCertificate = new SecretBuilder()
            .withNewMetadata()
            .withName(ClusterOperator.secretName(CLUSTER))
            .withNamespace(NAMESPACE)
            .endMetadata()
            .withData(map("cluster-operator.key", "notacert",
                    "cluster-operator.crt", "notacert",
                    "cluster-operator.p12", "notatruststore",
                    "cluster-operator.password", "notapassword"))
            .build();

    Checkpoint a = context.checkpoint();

    finder.findZookeeperLeader(CLUSTER, NAMESPACE, asList(createPodWithId(0), createPodWithId(1)), secretWithBadCertificate)
            .onComplete(context.failing(e -> context.verify(() -> {
                assertThat(e, instanceOf(RuntimeException.class));
                assertThat(e.getMessage(), is("Bad/corrupt certificate found in data.cluster-operator\\.crt of Secret testcluster-cluster-operator-certs in namespace testns"));
                a.flag();
            })));

}
 
Example #19
Source File: PropertiesTest.java    From vault-crd with Apache License 2.0 5 votes vote down vote up
@After
@Before
public void cleanup() {
    Secret secret = client.secrets().inNamespace("default").withName("properties").get();
    if (secret != null) {
        client.secrets().inNamespace("default").withName("properties").cascading(true).delete();
    }
}
 
Example #20
Source File: KubernetesGCPServiceAccountSecretManagerTest.java    From styx with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldRemoveServiceAccountSecretsInPastEpoch() throws Exception {
  final Secret secret = fakeServiceAccountKeySecret(
      SERVICE_ACCOUNT, PAST_SECRET_EPOCH, "old-json-key", "old-p12-key", EXPIRED_CREATION_TIMESTAMP.toString());

  when(secretList.getItems()).thenReturn(List.of(secret));

  sut.cleanup();

  verify(serviceAccountKeyManager).deleteKey(keyName(SERVICE_ACCOUNT, "old-json-key"));
  verify(serviceAccountKeyManager).deleteKey(keyName(SERVICE_ACCOUNT, "old-p12-key"));
  verify(k8sClient).deleteSecret(secret.getMetadata().getName());
  verify(stats).recordServiceAccountCleanup();
}
 
Example #21
Source File: KubernetesSecrets.java    From che with Eclipse Public License 2.0 5 votes vote down vote up
/**
 * Finds secrets matching specified label selector.
 *
 * @param labelSelector selector to filter secrets
 * @return matched secrets list
 * @throws InfrastructureException when any exception occurs
 */
public List<Secret> get(LabelSelector labelSelector) throws InfrastructureException {
  try {
    return clientFactory
        .create(workspaceId)
        .secrets()
        .inNamespace(namespace)
        .withLabelSelector(labelSelector)
        .list()
        .getItems();
  } catch (KubernetesClientException e) {
    throw new KubernetesInfrastructureException(e);
  }
}
 
Example #22
Source File: ZookeeperLeaderFinderTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
@Override
NetClientOptions clientOptions(Secret coCertKeySecret, Secret clusterCaCertificateSecret) {
    return new NetClientOptions()
            .setKeyCertOptions(coCertificate.keyCertOptions())
            .setTrustOptions(zkCertificate.trustOptions())
            .setSsl(true);
}
 
Example #23
Source File: KafkaClusterTest.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
private Secret generateBrokerSecret(Set<String> externalBootstrapAddress, Map<Integer, Set<String>> externalAddresses) {
    ClusterCa clusterCa = new ClusterCa(new OpenSslCertManager(), new PasswordGenerator(10, "a", "a"), cluster, null, null);
    clusterCa.createRenewOrReplace(namespace, cluster, emptyMap(), null, true);

    kc.generateCertificates(kafkaAssembly, clusterCa, externalBootstrapAddress, externalAddresses, true);
    return kc.generateBrokersSecret();
}
 
Example #24
Source File: KubernetesInternalRuntimeTest.java    From che with Eclipse Public License 2.0 5 votes vote down vote up
@Test
public void startKubernetesEnvironmentWithDeploymentsAndPods() throws Exception {
  when(k8sEnv.getDeploymentsCopy()).thenReturn(deploymentsMap);
  when(k8sEnv.getSecrets()).thenReturn(ImmutableMap.of("secret", new Secret()));
  when(k8sEnv.getConfigMaps()).thenReturn(ImmutableMap.of("configMap", new ConfigMap()));

  internalRuntime.start(emptyMap());

  verify(toolingProvisioner).provision(IDENTITY, startSynchronizer, k8sEnv, emptyMap());
  verify(internalEnvironmentProvisioner).provision(IDENTITY, k8sEnv);
  verify(kubernetesEnvironmentProvisioner).provision(k8sEnv, IDENTITY);
  verify(deployments).deploy(any(Deployment.class));
  verify(deployments).deploy(any(Pod.class));
  verify(ingresses).create(any());
  verify(services).create(any());
  verify(secrets).create(any());
  verify(configMaps).create(any());
  verify(namespace.deployments(), times(1)).watchEvents(any());
  verify(eventService, times(6)).publish(any());
  verifyOrderedEventsChains(
      new MachineStatusEvent[] {
        newEvent(M1_NAME, STARTING), newEvent(M1_NAME, STARTING), newEvent(M1_NAME, RUNNING)
      },
      new MachineStatusEvent[] {
        newEvent(M2_NAME, STARTING), newEvent(M2_NAME, STARTING), newEvent(M2_NAME, RUNNING)
      });
  verify(serverCheckerFactory).create(IDENTITY, M1_NAME, emptyMap());
  verify(serverCheckerFactory).create(IDENTITY, M2_NAME, emptyMap());
  verify(serversChecker, times(2)).startAsync(any());
  verify(namespace.deployments(), times(1)).stopWatch();
}
 
Example #25
Source File: PatchServiceTest.java    From jkube with Eclipse Public License 2.0 5 votes vote down vote up
@Test
public void testSecretPatching() {
    Secret oldSecret = new SecretBuilder()
            .withNewMetadata().withName("secret").endMetadata()
            .addToData("test", "dGVzdA==")
            .build();
    Secret newSecret = new SecretBuilder()
            .withNewMetadata().withName("secret").endMetadata()
            .addToStringData("test", "test")
            .build();
    WebServerEventCollector<OpenShiftMockServer> collector = new WebServerEventCollector<>(mockServer);
    mockServer.expect().get().withPath("/api/v1/namespaces/test/secrets/secret")
            .andReply(collector.record("get-secret").andReturn(200, oldSecret)).always();
    mockServer.expect().patch().withPath("/api/v1/namespaces/test/secrets/secret")
            .andReply(collector.record("patch-secret")
                    .andReturn(200, new SecretBuilder().withMetadata(newSecret.getMetadata())
                            .addToStringData(oldSecret.getData()).build())).once();

    OpenShiftClient client = mockServer.createOpenShiftClient();

    PatchService patchService = new PatchService(client, log);

    patchService.compareAndPatchEntity("test", newSecret, oldSecret);
    collector.assertEventsRecordedInOrder("get-secret", "get-secret", "patch-secret");
    assertEquals("[{\"op\":\"remove\",\"path\":\"/data\"},{\"op\":\"add\",\"path\":\"/stringData\",\"value\":{\"test\":\"test\"}}]", collector.getBodies().get(2));

}
 
Example #26
Source File: ResourceUtils.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
public static Secret createClientsCaKeySecret()  {
    return new SecretBuilder()
            .withNewMetadata()
                .withName(ResourceUtils.CA_KEY_NAME)
                .withNamespace(NAMESPACE)
            .endMetadata()
            .addToData("ca.key", Base64.getEncoder().encodeToString("clients-ca-key".getBytes()))
            .build();
}
 
Example #27
Source File: KubernetesGCPServiceAccountSecretManagerTest.java    From styx with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldHandleErrorsWhenDeletingServiceAccountKeysAndSecret() throws Exception {
  final Secret secret1 = fakeServiceAccountKeySecret(
      SERVICE_ACCOUNT_1, SECRET_EPOCH, "json-key-1", "p12-key-1", EXPIRED_CREATION_TIMESTAMP.toString());
  final Secret secret2 = fakeServiceAccountKeySecret(
      SERVICE_ACCOUNT_2, SECRET_EPOCH, "json-key-2", "p12-key-2", EXPIRED_CREATION_TIMESTAMP.toString());
  final Secret secret3 = fakeServiceAccountKeySecret(
      SERVICE_ACCOUNT_3, SECRET_EPOCH, "json-key-3", "p12-key-3", EXPIRED_CREATION_TIMESTAMP.toString());

  when(secretList.getItems()).thenReturn(List.of(secret1, secret2, secret3));

  doThrow(new KubernetesClientException("fail delete secret1"))
      .when(k8sClient).deleteSecret(secret1.getMetadata().getName());
  doThrow(new IOException("fail delete json-key-2"))
      .when(serviceAccountKeyManager).deleteKey(keyName(SERVICE_ACCOUNT_2,"json-key-2"));
  doThrow(new IOException("fail delete p12-key-3"))
      .when(serviceAccountKeyManager).deleteKey(keyName(SERVICE_ACCOUNT_3,"p12-key-3"));

  sut.cleanup();

  verify(serviceAccountKeyManager).deleteKey(keyName(SERVICE_ACCOUNT_1, "json-key-1"));
  verify(serviceAccountKeyManager).deleteKey(keyName(SERVICE_ACCOUNT_1, "p12-key-1"));
  verify(k8sClient).deleteSecret(secret1.getMetadata().getName());

  verify(serviceAccountKeyManager).deleteKey(keyName(SERVICE_ACCOUNT_2, "json-key-2"));
  verify(serviceAccountKeyManager, never()).deleteKey(keyName(SERVICE_ACCOUNT_2, "p12-key-2"));
  verify(k8sClient, never()).deleteSecret(secret2.getMetadata().getName());

  verify(serviceAccountKeyManager).deleteKey(keyName(SERVICE_ACCOUNT_3, "json-key-3"));
  verify(serviceAccountKeyManager).deleteKey(keyName(SERVICE_ACCOUNT_3, "p12-key-3"));
  verify(k8sClient, never()).deleteSecret(secret3.getMetadata().getName());

  verify(stats, never()).recordServiceAccountCleanup();
}
 
Example #28
Source File: AddDockerConfigJsonSecretDecorator.java    From dekorate with Apache License 2.0 5 votes vote down vote up
@Override
public void visit(KubernetesListBuilder list) {
  String name = Strings.isNotNullOrEmpty(this.name) ? this.name : getMandatoryDeploymentMetadata(list).getName();
  Secret secret = new SecretBuilder()
    .withNewMetadata()
      .withName(name)
      .withAnnotations(this.annotations)
    .endMetadata()
    .withType(TYPE)
    .addToData(DOT_DOCKER_CONFIG_JSON, this.content)
    .build();

  list.addToSecretItems(secret);
}
 
Example #29
Source File: KafkaCluster.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
/**
 * Generate the Secret containing the Kafka brokers certificates signed by the cluster CA certificate used for TLS based
 * internal communication with Zookeeper.
 * It also contains the related Kafka brokers private keys.
 *
 * @return The generated Secret
 */
public Secret generateBrokersSecret() {

    Map<String, String> data = new HashMap<>(replicas * 4);
    for (int i = 0; i < replicas; i++) {
        CertAndKey cert = brokerCerts.get(KafkaCluster.kafkaPodName(cluster, i));
        data.put(KafkaCluster.kafkaPodName(cluster, i) + ".key", cert.keyAsBase64String());
        data.put(KafkaCluster.kafkaPodName(cluster, i) + ".crt", cert.certAsBase64String());
        data.put(KafkaCluster.kafkaPodName(cluster, i) + ".p12", cert.keyStoreAsBase64String());
        data.put(KafkaCluster.kafkaPodName(cluster, i) + ".password", cert.storePasswordAsBase64String());
    }
    return createSecret(KafkaCluster.brokersSecretName(cluster), data);
}
 
Example #30
Source File: TillerInstaller.java    From microbean-helm with Apache License 2.0 5 votes vote down vote up
protected Secret createSecret(final String namespace,
                              final URI tlsKeyUri,
                              final URI tlsCertUri,
                              final URI tlsCaCertUri,
                              final Map<String, String> labels)
  throws IOException {
  
  final Secret secret = new Secret();
  secret.setType("Opaque");

  final Map<String, String> secretData = new HashMap<>();
  
  try (final InputStream tlsKeyStream = read(tlsKeyUri)) {
    if (tlsKeyStream != null) {
      secretData.put("tls.key", Base64.getEncoder().encodeToString(toByteArray(tlsKeyStream)));
    }
  }

  try (final InputStream tlsCertStream = read(tlsCertUri)) {
    if (tlsCertStream != null) {
      secretData.put("tls.crt", Base64.getEncoder().encodeToString(toByteArray(tlsCertStream)));
    }
  }
  
  try (final InputStream tlsCaCertStream = read(tlsCaCertUri)) {
    if (tlsCaCertStream != null) {
      secretData.put("ca.crt", Base64.getEncoder().encodeToString(toByteArray(tlsCaCertStream)));
    }
  }

  secret.setData(secretData);

  final ObjectMeta metadata = new ObjectMeta();
  metadata.setNamespace(normalizeNamespace(namespace));
  metadata.setName(SECRET_NAME);
  metadata.setLabels(normalizeLabels(labels));
  secret.setMetadata(metadata);
  
  return secret;
}