io.fabric8.kubernetes.api.model.LabelSelectorBuilder Java Examples

The following examples show how to use io.fabric8.kubernetes.api.model.LabelSelectorBuilder. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SecretAsContainerResourceProvisioner.java    From che with Eclipse Public License 2.0 6 votes vote down vote up
public void provision(E env, RuntimeIdentity runtimeIdentity, KubernetesNamespace namespace)
    throws InfrastructureException {
  LabelSelector selector = new LabelSelectorBuilder().withMatchLabels(secretLabels).build();
  for (Secret secret : namespace.secrets().get(selector)) {
    if (secret.getMetadata().getAnnotations() == null) {
      throw new InfrastructureException(
          format(
              "Unable to mount secret '%s': it has missing required annotations. Please check documentation for secret format guide.",
              secret.getMetadata().getName()));
    }
    String mountType = secret.getMetadata().getAnnotations().get(ANNOTATION_MOUNT_AS);
    if ("env".equalsIgnoreCase(mountType)) {
      environmentVariableSecretApplier.applySecret(env, runtimeIdentity, secret);
    } else if ("file".equalsIgnoreCase(mountType)) {
      fileSecretApplier.applySecret(env, runtimeIdentity, secret);
    } else {
      throw new InfrastructureException(
          format(
              "Unable to mount secret '%s': it has missing or unknown type of the mount. Please make sure that '%s' annotation has value either 'env' or 'file'.",
              secret.getMetadata().getName(), ANNOTATION_MOUNT_AS));
    }
  }
}
 
Example #2
Source File: TektonHandler.java    From dekorate with Apache License 2.0 6 votes vote down vote up
public PersistentVolumeClaim createSourceWorkspacePvc(TektonConfig config) {
  Map<String, Quantity> requests = new HashMap<String, Quantity>() {{
      put("storage", new QuantityBuilder().withAmount(String.valueOf(config.getSourceWorkspaceClaim().getSize())).withFormat(config.getSourceWorkspaceClaim().getUnit()).build());
  }};
  LabelSelector selector = null;


  if (config.getSourceWorkspaceClaim().getMatchLabels().length != 0) {
    selector = new LabelSelectorBuilder()
      .withMatchLabels(Arrays.stream(config.getSourceWorkspaceClaim().getMatchLabels()).collect(Collectors.toMap(l -> l.getKey(), l -> l.getValue())))
      .build();
  }
  return new PersistentVolumeClaimBuilder()
    .withNewMetadata()
    .withName(sourceWorkspaceClaimName(config))
    .endMetadata()
    .withNewSpec()
    .withAccessModes(config.getSourceWorkspaceClaim().getAccessMode().name())
    .withStorageClassName(config.getSourceWorkspaceClaim().getStorageClass())
    .withNewResources().withRequests(requests).endResources()
    .withSelector(selector)
    .endSpec()
    .build();
}
 
Example #3
Source File: TektonHandler.java    From dekorate with Apache License 2.0 6 votes vote down vote up
public PersistentVolumeClaim createM2WorkspacePvc(TektonConfig config) {
  Map<String, Quantity> requests = new HashMap<String, Quantity>() {{
      put("storage", new QuantityBuilder().withAmount(String.valueOf(config.getM2WorkspaceClaim().getSize())).withFormat(config.getM2WorkspaceClaim().getUnit()).build());
  }};
  LabelSelector selector = null;
  if (config.getM2WorkspaceClaim().getMatchLabels().length != 0) {
    selector = new LabelSelectorBuilder()
      .withMatchLabels(Arrays.stream(config.getM2WorkspaceClaim().getMatchLabels()).collect(Collectors.toMap(l -> l.getKey(), l -> l.getValue())))
      .build();
  }

  return new PersistentVolumeClaimBuilder()
    .withNewMetadata()
    .withName(m2WorkspaceClaimName(config))
    .endMetadata()
    .withNewSpec()
    .withAccessModes(config.getM2WorkspaceClaim().getAccessMode().name())
    .withStorageClassName(config.getM2WorkspaceClaim().getStorageClass())
    .withNewResources().withRequests(requests).endResources()
    .withSelector(selector)
    .endSpec()
    .build();
}
 
Example #4
Source File: DeploymentConfigUtils.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
/**
 * Method to check that all pods for expected DeploymentConfig were rolled
 * @param name DeploymentConfig name
 * @param snapshot Snapshot of pods for DeploymentConfig before the rolling update
 * @return true when the pods for DeploymentConfig are recreated
 */
public static boolean depConfigHasRolled(String name, Map<String, String> snapshot) {
    LOGGER.debug("Existing snapshot: {}", new TreeMap<>(snapshot));
    LabelSelector selector = new LabelSelectorBuilder().addToMatchLabels(kubeClient().getDeploymentConfigSelectors(name)).build();
    Map<String, String> map = PodUtils.podSnapshot(selector);
    LOGGER.debug("Current  snapshot: {}", new TreeMap<>(map));
    int current = map.size();
    map.keySet().retainAll(snapshot.keySet());
    if (current == snapshot.size() && map.isEmpty()) {
        LOGGER.info("All pods seem to have rolled");
        return true;
    } else {
        LOGGER.debug("Some pods still need to roll: {}", map);
        return false;
    }
}
 
Example #5
Source File: AbstractModel.java    From strimzi-kafka-operator with Apache License 2.0 6 votes vote down vote up
/**
 * Creates the PodDisruptionBudget
 *
 * @return The default PodDisruptionBudget
 */
protected PodDisruptionBudget createPodDisruptionBudget()   {
    return new PodDisruptionBudgetBuilder()
            .withNewMetadata()
                .withName(name)
                .withLabels(getLabelsWithStrimziName(name, templatePodDisruptionBudgetLabels).toMap())
                .withNamespace(namespace)
                .withAnnotations(templatePodDisruptionBudgetAnnotations)
                .withOwnerReferences(createOwnerReference())
            .endMetadata()
            .withNewSpec()
                .withNewMaxUnavailable(templatePodDisruptionBudgetMaxUnavailable)
                .withSelector(new LabelSelectorBuilder().withMatchLabels(getSelectorLabels().toMap()).build())
            .endSpec()
            .build();
}
 
Example #6
Source File: DeploymentConfigUtils.java    From strimzi-kafka-operator with Apache License 2.0 5 votes vote down vote up
/**
 * Wait until the given DeploymentConfig is ready.
 * @param depConfigName The name of the DeploymentConfig.
 */
public static Map<String, String> waitForDeploymentConfigAndPodsReady(String depConfigName, int expectPods) {
    waitForDeploymentConfigReady(depConfigName);

    LOGGER.info("Waiting for Pod(s) of DeploymentConfig {} to be ready", depConfigName);

    LabelSelector deploymentConfigSelector =
        new LabelSelectorBuilder().addToMatchLabels(kubeClient().getDeploymentConfigSelectors(depConfigName)).build();

    PodUtils.waitForPodsReady(deploymentConfigSelector, expectPods, true);
    LOGGER.info("DeploymentConfig {} is ready", depConfigName);

    return depConfigSnapshot(depConfigName);
}
 
Example #7
Source File: AppsDeploymentTranformer.java    From apollo with Apache License 2.0 5 votes vote down vote up
@Override
public Deployment transform(Deployment deployment,
                            io.logz.apollo.models.Deployment apolloDeployment,
                            Service apolloService,
                            Environment apolloEnvironment,
                            DeployableVersion apolloDeployableVersion,
                            Group apolloGroup) {

    deployment.setApiVersion("apps/v1");

    LabelSelectorBuilder labelSelectorBuilder = new LabelSelectorBuilder();

    deployment.getSpec().getTemplate().getMetadata().getLabels().entrySet()
              .stream()
              .filter(label -> !label.getKey().equals("apollo_unique_identifier"))
              .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))
              .forEach(labelSelectorBuilder::addToMatchLabels);

    deployment.getSpec().getTemplate().getMetadata().getLabels().entrySet()
              .stream()
              .filter(label -> label.getKey().equals("apollo_unique_identifier"))
              .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue))
              .forEach((key, value) -> labelSelectorBuilder.removeFromMatchLabels(key));

    deployment.getSpec().setSelector(labelSelectorBuilder.build());

    return deployment;
}
 
Example #8
Source File: AbstractModel.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
protected StatefulSet createStatefulSet(
        Map<String, String> stsAnnotations,
        Map<String, String> podAnnotations,
        List<Volume> volumes,
        List<PersistentVolumeClaim> volumeClaims,
        Affinity affinity,
        List<Container> initContainers,
        List<Container> containers,
        List<LocalObjectReference> imagePullSecrets,
        boolean isOpenShift) {

    PodSecurityContext securityContext = templateSecurityContext;

    // if a persistent volume claim is requested and the running cluster is a Kubernetes one (non-openshift) and we
    // have no user configured PodSecurityContext we set the podSecurityContext.
    // This is to give each pod write permissions under a specific group so that if a pod changes users it does not have permission issues.
    if (ModelUtils.containsPersistentStorage(storage) && !isOpenShift && securityContext == null) {
        securityContext = new PodSecurityContextBuilder()
                .withFsGroup(AbstractModel.DEFAULT_FS_GROUPID)
                .build();
    }

    StatefulSet statefulSet = new StatefulSetBuilder()
            .withNewMetadata()
                .withName(name)
                .withLabels(getLabelsWithStrimziName(name, templateStatefulSetLabels).toMap())
                .withNamespace(namespace)
                .withAnnotations(mergeLabelsOrAnnotations(stsAnnotations, templateStatefulSetAnnotations))
                .withOwnerReferences(createOwnerReference())
            .endMetadata()
            .withNewSpec()
                .withPodManagementPolicy(templatePodManagementPolicy.toValue())
                .withUpdateStrategy(new StatefulSetUpdateStrategyBuilder().withType("OnDelete").build())
                .withSelector(new LabelSelectorBuilder().withMatchLabels(getSelectorLabels().toMap()).build())
                .withServiceName(headlessServiceName)
                .withReplicas(replicas)
                .withNewTemplate()
                    .withNewMetadata()
                        .withName(name)
                        .withLabels(getLabelsWithStrimziName(name, templatePodLabels).toMap())
                        .withAnnotations(mergeLabelsOrAnnotations(podAnnotations, templatePodAnnotations))
                    .endMetadata()
                    .withNewSpec()
                        .withServiceAccountName(getServiceAccountName())
                        .withAffinity(affinity)
                        .withInitContainers(initContainers)
                        .withContainers(containers)
                        .withVolumes(volumes)
                        .withTolerations(getTolerations())
                        .withTerminationGracePeriodSeconds(Long.valueOf(templateTerminationGracePeriodSeconds))
                        .withImagePullSecrets(templateImagePullSecrets != null ? templateImagePullSecrets : imagePullSecrets)
                        .withSecurityContext(securityContext)
                        .withPriorityClassName(templatePodPriorityClassName)
                        .withSchedulerName(templatePodSchedulerName != null ? templatePodSchedulerName : "default-scheduler")
                    .endSpec()
                .endTemplate()
                .withVolumeClaimTemplates(volumeClaims)
            .endSpec()
            .build();

    return statefulSet;
}
 
Example #9
Source File: ConnectS2IST.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
@Test
void testCustomAndUpdatedValues() {
    final String kafkaConnectS2IName = "kafka-connect-s2i-name-3";
    KafkaResource.kafkaEphemeral(CLUSTER_NAME, 3, 1).done();

    LinkedHashMap<String, String> envVarGeneral = new LinkedHashMap<>();
    envVarGeneral.put("TEST_ENV_1", "test.env.one");
    envVarGeneral.put("TEST_ENV_2", "test.env.two");

    KafkaConnectS2IResource.kafkaConnectS2I(kafkaConnectS2IName, CLUSTER_NAME, 1)
        .editMetadata()
            .addToLabels("type", "kafka-connect-s2i")
        .endMetadata()
        .editSpec()
            .withNewTemplate()
                .withNewConnectContainer()
                    .withEnv(StUtils.createContainerEnvVarsFromMap(envVarGeneral))
                .endConnectContainer()
            .endTemplate()
        .endSpec()
        .done();

    String depConfName = KafkaConnectS2IResources.deploymentName(kafkaConnectS2IName);

    LinkedHashMap<String, String> envVarUpdated = new LinkedHashMap<>();
    envVarUpdated.put("TEST_ENV_2", "updated.test.env.two");
    envVarUpdated.put("TEST_ENV_3", "test.env.three");

    Map<String, String> connectSnapshot = DeploymentConfigUtils.depConfigSnapshot(KafkaConnectS2IResources.deploymentName(kafkaConnectS2IName));

    LOGGER.info("Verify values before update");

    LabelSelector deploymentConfigSelector = new LabelSelectorBuilder().addToMatchLabels(kubeClient().getDeploymentConfigSelectors(KafkaConnectS2IResources.deploymentName(kafkaConnectS2IName))).build();
    String connectPodName = kubeClient().listPods(deploymentConfigSelector).get(0).getMetadata().getName();

    checkSpecificVariablesInContainer(connectPodName, KafkaConnectS2IResources.deploymentName(kafkaConnectS2IName), envVarGeneral);

    LOGGER.info("Updating values in ConnectS2I container");
    KafkaConnectS2IResource.replaceConnectS2IResource(kafkaConnectS2IName, kc -> {
        kc.getSpec().getTemplate().getConnectContainer().setEnv(StUtils.createContainerEnvVarsFromMap(envVarUpdated));
    });

    DeploymentConfigUtils.waitTillDepConfigHasRolled(depConfName, connectSnapshot);

    deploymentConfigSelector = new LabelSelectorBuilder().addToMatchLabels(kubeClient().getDeploymentConfigSelectors(KafkaConnectS2IResources.deploymentName(kafkaConnectS2IName))).build();
    connectPodName = kubeClient().listPods(deploymentConfigSelector).get(0).getMetadata().getName();

    LOGGER.info("Verify values after update");
    checkSpecificVariablesInContainer(connectPodName, depConfName, envVarUpdated);
}
 
Example #10
Source File: KubernetesResource.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
/**
 * Method for allowing network policies for Connect or ConnectS2I
 * @param resource mean Connect or ConnectS2I resource
 * @param deploymentName name of resource deployment - for setting strimzi.io/name
 */
public static void allowNetworkPolicySettingsForResource(HasMetadata resource, String deploymentName) {
    LabelSelector labelSelector = new LabelSelectorBuilder()
            .addToMatchLabels(Constants.KAFKA_CLIENTS_LABEL_KEY, Constants.KAFKA_CLIENTS_LABEL_VALUE)
            .build();

    if (kubeClient().listPods(labelSelector).size() == 0) {
        throw new RuntimeException("You did not create the Kafka Client instance(pod) before using the Kafka Connect");
    }

    LOGGER.info("Apply NetworkPolicy access to {} from pods with LabelSelector {}", deploymentName, labelSelector);

    NetworkPolicy networkPolicy = new NetworkPolicyBuilder()
            .withNewApiVersion("networking.k8s.io/v1")
            .withNewKind("NetworkPolicy")
            .withNewMetadata()
                .withName(resource.getMetadata().getName() + "-allow")
            .endMetadata()
            .withNewSpec()
                .addNewIngress()
                    .addNewFrom()
                        .withPodSelector(labelSelector)
                    .endFrom()
                    .addNewPort()
                        .withNewPort(8083)
                        .withNewProtocol("TCP")
                    .endPort()
                    .addNewPort()
                        .withNewPort(9404)
                        .withNewProtocol("TCP")
                    .endPort()
                    .addNewPort()
                        .withNewPort(8080)
                        .withNewProtocol("TCP")
                    .endPort()
                  .endIngress()
                .withNewPodSelector()
                    .addToMatchLabels("strimzi.io/cluster", resource.getMetadata().getName())
                    .addToMatchLabels("strimzi.io/kind", resource.getKind())
                    .addToMatchLabels("strimzi.io/name", deploymentName)
                .endPodSelector()
                .withPolicyTypes("Ingress")
            .endSpec()
            .build();

    LOGGER.debug("Going to apply the following NetworkPolicy: {}", networkPolicy.toString());
    deleteLater(kubeClient().getClient().network().networkPolicies().inNamespace(ResourceManager.kubeClient().getNamespace()).createOrReplace(networkPolicy));
    LOGGER.info("Network policy for LabelSelector {} successfully applied", labelSelector);
}
 
Example #11
Source File: AbstractModel.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
protected Deployment createDeployment(
        DeploymentStrategy updateStrategy,
        Map<String, String> deploymentAnnotations,
        Map<String, String> podAnnotations,
        Affinity affinity,
        List<Container> initContainers,
        List<Container> containers,
        List<Volume> volumes,
        List<LocalObjectReference> imagePullSecrets) {

    Deployment dep = new DeploymentBuilder()
            .withNewMetadata()
                .withName(name)
                .withLabels(getLabelsWithStrimziName(name, templateDeploymentLabels).toMap())
                .withNamespace(namespace)
                .withAnnotations(mergeLabelsOrAnnotations(deploymentAnnotations, templateDeploymentAnnotations))
                .withOwnerReferences(createOwnerReference())
            .endMetadata()
            .withNewSpec()
                .withStrategy(updateStrategy)
                .withReplicas(replicas)
                .withSelector(new LabelSelectorBuilder().withMatchLabels(getSelectorLabels().toMap()).build())
                .withNewTemplate()
                    .withNewMetadata()
                        .withLabels(getLabelsWithStrimziName(name, templatePodLabels).toMap())
                        .withAnnotations(mergeLabelsOrAnnotations(podAnnotations, templatePodAnnotations))
                    .endMetadata()
                    .withNewSpec()
                        .withAffinity(affinity)
                        .withServiceAccountName(getServiceAccountName())
                        .withInitContainers(initContainers)
                        .withContainers(containers)
                        .withVolumes(volumes)
                        .withTolerations(getTolerations())
                        .withTerminationGracePeriodSeconds(Long.valueOf(templateTerminationGracePeriodSeconds))
                        .withImagePullSecrets(templateImagePullSecrets != null ? templateImagePullSecrets : imagePullSecrets)
                        .withSecurityContext(templateSecurityContext)
                        .withPriorityClassName(templatePodPriorityClassName)
                        .withSchedulerName(templatePodSchedulerName)
                    .endSpec()
                .endTemplate()
            .endSpec()
            .build();

    return dep;
}
 
Example #12
Source File: KubernetesResourceUtil.java    From jkube with Eclipse Public License 2.0 4 votes vote down vote up
private static LabelSelector toLabelSelector(Map<String, String> matchLabels) {
    if (matchLabels != null && !matchLabels.isEmpty()) {
        return new LabelSelectorBuilder().withMatchLabels(matchLabels).build();
    }
    return null;
}
 
Example #13
Source File: AbstractConnectOperator.java    From strimzi-kafka-operator with Apache License 2.0 4 votes vote down vote up
/**
 * Reconcile all the connectors selected by the given connect instance, updated each connectors status with the result.
 * @param reconciliation The reconciliation
 * @param connect The connector
 * @param connectStatus Status of the KafkaConnect or KafkaConnectS2I resource (will be used to set the available
 *                      connector plugins)
 * @param scaledToZero  Indicated whether the related Connect cluster is currently scaled to 0 replicas
 * @return A future, failed if any of the connectors' statuses could not be updated.
 */
protected Future<Void> reconcileConnectors(Reconciliation reconciliation, T connect, S connectStatus, boolean scaledToZero) {
    String connectName = connect.getMetadata().getName();
    String namespace = connect.getMetadata().getNamespace();
    String host = KafkaConnectResources.qualifiedServiceName(connectName, namespace);

    if (!isUseResources(connect))    {
        return Future.succeededFuture();
    }

    if (scaledToZero)   {
        return connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build()))
                .compose(connectors -> CompositeFuture.join(
                        connectors.stream().map(connector -> maybeUpdateConnectorStatus(reconciliation, connector, null, zeroReplicas(namespace, connectName)))
                                .collect(Collectors.toList())
                ))
                .map((Void) null);
    }

    KafkaConnectApi apiClient = connectClientProvider.apply(vertx);

    return CompositeFuture.join(
            apiClient.list(host, port),
            connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build())),
            apiClient.listConnectorPlugins(host, port)
    ).compose(cf -> {
        List<String> runningConnectorNames = cf.resultAt(0);
        List<KafkaConnector> desiredConnectors = cf.resultAt(1);
        List<ConnectorPlugin> connectorPlugins = cf.resultAt(2);

        log.debug("{}: Setting list of connector plugins in Kafka Connect status", reconciliation);
        connectStatus.setConnectorPlugins(connectorPlugins);

        if (connectorsResourceCounter != null)  {
            connectorsResourceCounter.set(desiredConnectors.size());
        }

        Set<String> deleteConnectorNames = new HashSet<>(runningConnectorNames);
        deleteConnectorNames.removeAll(desiredConnectors.stream().map(c -> c.getMetadata().getName()).collect(Collectors.toSet()));
        log.debug("{}: {} cluster: delete connectors: {}", reconciliation, kind(), deleteConnectorNames);
        Stream<Future<Void>> deletionFutures = deleteConnectorNames.stream().map(connectorName ->
                reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connectorName, null)
        );

        log.debug("{}: {} cluster: required connectors: {}", reconciliation, kind(), desiredConnectors);
        Stream<Future<Void>> createUpdateFutures = desiredConnectors.stream()
                .map(connector -> reconcileConnectorAndHandleResult(reconciliation, host, apiClient, true, connector.getMetadata().getName(), connector));

        return CompositeFuture.join(Stream.concat(deletionFutures, createUpdateFutures).collect(Collectors.toList())).map((Void) null);
    }).recover(error -> {
        if (error instanceof ConnectTimeoutException) {
            Promise<Void> connectorStatuses = Promise.promise();
            log.warn("{}: Failed to connect to the REST API => trying to update the connector status", reconciliation);

            connectorOperator.listAsync(namespace, Optional.of(new LabelSelectorBuilder().addToMatchLabels(Labels.STRIMZI_CLUSTER_LABEL, connectName).build()))
                    .compose(connectors -> CompositeFuture.join(
                            connectors.stream().map(connector -> maybeUpdateConnectorStatus(reconciliation, connector, null, error))
                                    .collect(Collectors.toList())
                    ))
                    .onComplete(ignore -> connectorStatuses.fail(error));

            return connectorStatuses.future();
        } else {
            return Future.failedFuture(error);
        }
    });
}
 
Example #14
Source File: KubernetesHandler.java    From dekorate with Apache License 2.0 4 votes vote down vote up
/**
 * Creates a {@link LabelSelector} that matches the labels for the {@link KubernetesConfig}.
 * @return          A labels selector.
 */
public LabelSelector createSelector(KubernetesConfig config) {
  return new LabelSelectorBuilder()
    .withMatchLabels(Labels.createLabels(config))
    .build();
}
 
Example #15
Source File: KubernetesHelper.java    From jkube with Eclipse Public License 2.0 4 votes vote down vote up
private static LabelSelector toLabelSelector(Map<String, String> matchLabels) {
    if (matchLabels != null && !matchLabels.isEmpty()) {
        return new LabelSelectorBuilder().withMatchLabels(matchLabels).build();
    }
    return null;
}
 
Example #16
Source File: PortForwardServiceTest.java    From jkube with Eclipse Public License 2.0 4 votes vote down vote up
@Test
public void testSimpleScenario() throws Exception {
    // Cannot test more complex scenarios due to errors in mockwebserver
    OpenShiftMockServer mockServer = new OpenShiftMockServer(false);

    Pod pod1 = new PodBuilder()
            .withNewMetadata()
            .withName("mypod")
            .addToLabels("mykey", "myvalue")
            .withResourceVersion("1")
            .endMetadata()
            .withNewStatus()
            .withPhase("run")
            .endStatus()
            .build();

    PodList pods1 = new PodListBuilder()
            .withItems(pod1)
            .withNewMetadata()
            .withResourceVersion("1")
            .endMetadata()
            .build();

    mockServer.expect().get().withPath("/api/v1/namespaces/test/pods?labelSelector=mykey%3Dmyvalue").andReturn(200, pods1).always();
    mockServer.expect().get().withPath("/api/v1/namespaces/test/pods").andReturn(200, pods1).always();
    mockServer.expect().get().withPath("/api/v1/namespaces/test/pods?labelSelector=mykey%3Dmyvalue&watch=true")
            .andUpgradeToWebSocket().open()
            .waitFor(1000)
            .andEmit(new WatchEvent(pod1, "MODIFIED"))
            .done().always();

    mockServer.expect().get().withPath("/api/v1/namespaces/test/pods?resourceVersion=1&watch=true")
            .andUpgradeToWebSocket().open()
            .waitFor(1000)
            .andEmit(new WatchEvent(pod1, "MODIFIED"))
            .done().always();

    OpenShiftClient client = mockServer.createOpenShiftClient();
    PortForwardService service = new PortForwardService(client, logger) {
        @Override
        public ProcessUtil.ProcessExecutionContext forwardPortAsync(KitLogger externalProcessLogger, String pod, String namespace, int remotePort, int localPort) throws JKubeServiceException {
            return new ProcessUtil.ProcessExecutionContext(process, Collections.<Thread>emptyList(), logger);
        }
    };

    try (Closeable c = service.forwardPortAsync(logger, new LabelSelectorBuilder().withMatchLabels(Collections.singletonMap("mykey", "myvalue")).build(), 8080, 9000)) {
        Thread.sleep(3000);
    }
}
 
Example #17
Source File: DeploymentConfigUtils.java    From strimzi-kafka-operator with Apache License 2.0 2 votes vote down vote up
/**
 * Returns a map of pod name to resource version for the pods currently in the given DeploymentConfig.
 * @param name The DeploymentConfig name.
 * @return A map of pod name to resource version for pods in the given DeploymentConfig.
 */
public static Map<String, String> depConfigSnapshot(String name) {
    LabelSelector selector = new LabelSelectorBuilder().addToMatchLabels(kubeClient().getDeploymentConfigSelectors(name)).build();
    return PodUtils.podSnapshot(selector);
}