org.apache.flink.client.deployment.ClusterDeploymentException Java Examples

The following examples show how to use org.apache.flink.client.deployment.ClusterDeploymentException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: YarnClusterDescriptor.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public ClusterClient<ApplicationId> deployJobCluster(
	ClusterSpecification clusterSpecification,
	JobGraph jobGraph,
	boolean detached) throws ClusterDeploymentException {

	// this is required because the slots are allocated lazily
	jobGraph.setAllowQueuedScheduling(true);

	try {
		return deployInternal(
			clusterSpecification,
			"Flink per-job cluster",
			getYarnJobClusterEntrypoint(),
			jobGraph,
			detached);
	} catch (Exception e) {
		throw new ClusterDeploymentException("Could not deploy Yarn job cluster.", e);
	}
}
 
Example #2
Source File: YarnClusterDescriptorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testFailIfTaskSlotsHigherThanMaxVcores() throws ClusterDeploymentException {
	final Configuration flinkConfiguration = new Configuration();

	YarnClusterDescriptor clusterDescriptor = createYarnClusterDescriptor(flinkConfiguration);

	clusterDescriptor.setLocalJarPath(new Path(flinkJar.getPath()));

	try {
		clusterDescriptor.deploySessionCluster(clusterSpecification);

		fail("The deploy call should have failed.");
	} catch (ClusterDeploymentException e) {
		// we expect the cause to be an IllegalConfigurationException
		if (!(e.getCause() instanceof IllegalConfigurationException)) {
			throw e;
		}
	} finally {
		clusterDescriptor.close();
	}
}
 
Example #3
Source File: YarnApplicationFileUploader.java    From flink with Apache License 2.0 6 votes vote down vote up
public YarnLocalResourceDescriptor uploadFlinkDist(final Path localJarPath) throws IOException, ClusterDeploymentException {
	if (flinkDist != null) {
		return flinkDist;
	} else if (!providedSharedLibs.isEmpty()) {
		throw new ClusterDeploymentException("The \"" + YarnConfigOptions.PROVIDED_LIB_DIRS.key() + "\"" +
				" has to also include the lib/, plugin/ and flink-dist jar." +
				" In other case, it cannot be used.");
	}

	flinkDist = registerSingleLocalResource(
			localJarPath.getName(),
			localJarPath,
			"",
			true,
			false);
	return flinkDist;
}
 
Example #4
Source File: YarnClusterDescriptor.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public ClusterClient<ApplicationId> deployJobCluster(
	ClusterSpecification clusterSpecification,
	JobGraph jobGraph,
	boolean detached) throws ClusterDeploymentException {

	// this is required because the slots are allocated lazily
	jobGraph.setAllowQueuedScheduling(true);

	try {
		return deployInternal(
			clusterSpecification,
			"Flink per-job cluster",
			getYarnJobClusterEntrypoint(),
			jobGraph,
			detached);
	} catch (Exception e) {
		throw new ClusterDeploymentException("Could not deploy Yarn job cluster.", e);
	}
}
 
Example #5
Source File: YarnClusterDescriptor.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public ClusterClientProvider<ApplicationId> deployJobCluster(
	ClusterSpecification clusterSpecification,
	JobGraph jobGraph,
	boolean detached) throws ClusterDeploymentException {
	try {
		return deployInternal(
			clusterSpecification,
			"Flink per-job cluster",
			getYarnJobClusterEntrypoint(),
			jobGraph,
			detached);
	} catch (Exception e) {
		throw new ClusterDeploymentException("Could not deploy Yarn job cluster.", e);
	}
}
 
Example #6
Source File: KubernetesClusterDescriptorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testDeployHighAvailabilitySessionCluster() throws ClusterDeploymentException {
	flinkConfig.setString(HighAvailabilityOptions.HA_MODE, HighAvailabilityMode.ZOOKEEPER.toString());
	final ClusterClient<String> clusterClient = deploySessionCluster().getClusterClient();
	checkClusterClient(clusterClient);

	final Container jmContainer = kubeClient
		.apps()
		.deployments()
		.list()
		.getItems()
		.get(0)
		.getSpec()
		.getTemplate()
		.getSpec()
		.getContainers()
		.get(0);
	assertTrue(
		"Environment " + ENV_FLINK_POD_IP_ADDRESS + " should be set.",
		jmContainer.getEnv().stream()
			.map(EnvVar::getName)
			.collect(Collectors.toList())
			.contains(ENV_FLINK_POD_IP_ADDRESS));

	clusterClient.close();
}
 
Example #7
Source File: YARNHighAvailabilityITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
private RestClusterClient<ApplicationId> deploySessionCluster(YarnClusterDescriptor yarnClusterDescriptor) throws ClusterDeploymentException {
	final int masterMemory = yarnClusterDescriptor.getFlinkConfiguration().get(JobManagerOptions.TOTAL_PROCESS_MEMORY).getMebiBytes();
	final int taskManagerMemory = 1024;
	final ClusterClient<ApplicationId> yarnClusterClient = yarnClusterDescriptor
			.deploySessionCluster(new ClusterSpecification.ClusterSpecificationBuilder()
					.setMasterMemoryMB(masterMemory)
					.setTaskManagerMemoryMB(taskManagerMemory)
					.setSlotsPerTaskManager(1)
					.createClusterSpecification())
			.getClusterClient();

	assertThat(yarnClusterClient, is(instanceOf(RestClusterClient.class)));
	return (RestClusterClient<ApplicationId>) yarnClusterClient;
}
 
Example #8
Source File: AbstractYarnClusterDescriptor.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public ClusterClient<ApplicationId> deploySessionCluster(ClusterSpecification clusterSpecification) throws ClusterDeploymentException {
	try {
		return deployInternal(
			clusterSpecification,
			"Flink session cluster",
			getYarnSessionClusterEntrypoint(),
			null,
			false);
	} catch (Exception e) {
		throw new ClusterDeploymentException("Couldn't deploy Yarn session cluster", e);
	}
}
 
Example #9
Source File: YarnClusterDescriptorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testDeployApplicationClusterWithDeploymentTargetNotCorrectlySet() {
	final Configuration flinkConfig = new Configuration();
	flinkConfig.set(PipelineOptions.JARS, Collections.singletonList("file:///path/of/user.jar"));
	flinkConfig.set(DeploymentOptions.TARGET, YarnDeploymentTarget.SESSION.getName());
	try (final YarnClusterDescriptor yarnClusterDescriptor = createYarnClusterDescriptor(flinkConfig)) {
		assertThrows(
			"Expected deployment.target=yarn-application",
			ClusterDeploymentException.class,
			() -> yarnClusterDescriptor.deployApplicationCluster(clusterSpecification, appConfig));
	}
}
 
Example #10
Source File: YarnClusterDescriptorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testConfigOverwrite() throws ClusterDeploymentException {
	Configuration configuration = new Configuration();
	// overwrite vcores in config
	configuration.setInteger(YarnConfigOptions.VCORES, Integer.MAX_VALUE);

	YarnClusterDescriptor clusterDescriptor = createYarnClusterDescriptor(configuration);

	clusterDescriptor.setLocalJarPath(new Path(flinkJar.getPath()));

	// configure slots
	ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder()
		.createClusterSpecification();

	try {
		clusterDescriptor.deploySessionCluster(clusterSpecification);

		fail("The deploy call should have failed.");
	} catch (ClusterDeploymentException e) {
		// we expect the cause to be an IllegalConfigurationException
		if (!(e.getCause() instanceof IllegalConfigurationException)) {
			throw e;
		}
	} finally {
		clusterDescriptor.close();
	}
}
 
Example #11
Source File: YarnClusterDescriptor.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public ClusterClientProvider<ApplicationId> deployApplicationCluster(
		final ClusterSpecification clusterSpecification,
		final ApplicationConfiguration applicationConfiguration) throws ClusterDeploymentException {
	checkNotNull(clusterSpecification);
	checkNotNull(applicationConfiguration);

	final YarnDeploymentTarget deploymentTarget = YarnDeploymentTarget.fromConfig(flinkConfiguration);
	if (YarnDeploymentTarget.APPLICATION != deploymentTarget) {
		throw new ClusterDeploymentException(
				"Couldn't deploy Yarn Application Cluster." +
						" Expected deployment.target=" + YarnDeploymentTarget.APPLICATION.getName() +
						" but actual one was \"" + deploymentTarget.getName() + "\"");
	}

	applicationConfiguration.applyToConfiguration(flinkConfiguration);

	final List<String> pipelineJars = flinkConfiguration.getOptional(PipelineOptions.JARS).orElse(Collections.emptyList());
	Preconditions.checkArgument(pipelineJars.size() == 1, "Should only have one jar");

	try {
		return deployInternal(
				clusterSpecification,
				"Flink Application Cluster",
				YarnApplicationClusterEntryPoint.class.getName(),
				null,
				false);
	} catch (Exception e) {
		throw new ClusterDeploymentException("Couldn't deploy Yarn Application Cluster", e);
	}
}
 
Example #12
Source File: YarnClusterDescriptor.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public ClusterClientProvider<ApplicationId> deploySessionCluster(ClusterSpecification clusterSpecification) throws ClusterDeploymentException {
	try {
		return deployInternal(
				clusterSpecification,
				"Flink session cluster",
				getYarnSessionClusterEntrypoint(),
				null,
				false);
	} catch (Exception e) {
		throw new ClusterDeploymentException("Couldn't deploy Yarn session cluster", e);
	}
}
 
Example #13
Source File: KubernetesClusterDescriptorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testDeployApplicationClusterWithDeploymentTargetNotCorrectlySet() {
	flinkConfig.set(PipelineOptions.JARS, Collections.singletonList("local:///path/of/user.jar"));
	flinkConfig.set(DeploymentOptions.TARGET, KubernetesDeploymentTarget.SESSION.getName());
	assertThrows(
		"Expected deployment.target=kubernetes-application",
		ClusterDeploymentException.class,
		() -> descriptor.deployApplicationCluster(clusterSpecification, appConfig));
}
 
Example #14
Source File: KubernetesClusterDescriptorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testDeployApplicationClusterWithClusterAlreadyExists() {
	flinkConfig.set(PipelineOptions.JARS, Collections.singletonList("local:///path/of/user.jar"));
	flinkConfig.set(DeploymentOptions.TARGET, KubernetesDeploymentTarget.APPLICATION.getName());
	mockExpectedServiceFromServerSide(loadBalancerSvc);
	assertThrows(
		"The Flink cluster " + CLUSTER_ID + " already exists.",
		ClusterDeploymentException.class,
		() -> descriptor.deployApplicationCluster(clusterSpecification, appConfig));
}
 
Example #15
Source File: KubernetesClusterDescriptor.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public ClusterClientProvider<String> deployJobCluster(
		ClusterSpecification clusterSpecification,
		JobGraph jobGraph,
		boolean detached) throws ClusterDeploymentException {
	throw new ClusterDeploymentException("Per-Job Mode not supported by Active Kubernetes deployments.");
}
 
Example #16
Source File: KubernetesClusterDescriptor.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public ClusterClientProvider<String> deployApplicationCluster(
		final ClusterSpecification clusterSpecification,
		final ApplicationConfiguration applicationConfiguration) throws ClusterDeploymentException {
	if (client.getRestService(clusterId).isPresent()) {
		throw new ClusterDeploymentException("The Flink cluster " + clusterId + " already exists.");
	}

	checkNotNull(clusterSpecification);
	checkNotNull(applicationConfiguration);

	final KubernetesDeploymentTarget deploymentTarget = KubernetesDeploymentTarget.fromConfig(flinkConfig);
	if (KubernetesDeploymentTarget.APPLICATION != deploymentTarget) {
		throw new ClusterDeploymentException(
			"Couldn't deploy Kubernetes Application Cluster." +
				" Expected deployment.target=" + KubernetesDeploymentTarget.APPLICATION.getName() +
				" but actual one was \"" + deploymentTarget + "\"");
	}

	applicationConfiguration.applyToConfiguration(flinkConfig);

	final List<File> pipelineJars = KubernetesUtils.checkJarFileForApplicationMode(flinkConfig);
	Preconditions.checkArgument(pipelineJars.size() == 1, "Should only have one jar");

	final ClusterClientProvider<String> clusterClientProvider = deployClusterInternal(
		KubernetesApplicationClusterEntrypoint.class.getName(),
		clusterSpecification,
		false);

	try (ClusterClient<String> clusterClient = clusterClientProvider.getClusterClient()) {
		LOG.info(
			"Create flink application cluster {} successfully, JobManager Web Interface: {}",
			clusterId,
			clusterClient.getWebInterfaceURL());
	}
	return clusterClientProvider;
}
 
Example #17
Source File: KubernetesClusterDescriptor.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public ClusterClientProvider<String> deploySessionCluster(ClusterSpecification clusterSpecification) throws ClusterDeploymentException {
	final ClusterClientProvider<String> clusterClientProvider = deployClusterInternal(
		KubernetesSessionClusterEntrypoint.class.getName(),
		clusterSpecification,
		false);

	try (ClusterClient<String> clusterClient = clusterClientProvider.getClusterClient()) {
		LOG.info(
			"Create flink session cluster {} successfully, JobManager Web Interface: {}",
			clusterId,
			clusterClient.getWebInterfaceURL());
	}
	return clusterClientProvider;
}
 
Example #18
Source File: AthenaXYarnClusterDescriptor.java    From AthenaX with Apache License 2.0 5 votes vote down vote up
@Override
public ClusterClient<ApplicationId> deployJobCluster(
    ClusterSpecification clusterSpecification,
    JobGraph jobGraph,
    boolean b) throws ClusterDeploymentException {
  return null;
}
 
Example #19
Source File: YarnJobDescriptor.java    From sylph with Apache License 2.0 5 votes vote down vote up
/**
 * flink1.5 add
 */
@Override
public ClusterClient<ApplicationId> deployJobCluster(ClusterSpecification clusterSpecification, JobGraph jobGraph, boolean detached)
        throws ClusterDeploymentException
{
    throw new UnsupportedOperationException("this method have't support!");
}
 
Example #20
Source File: YARNHighAvailabilityITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
private RestClusterClient<ApplicationId> deploySessionCluster(YarnClusterDescriptor yarnClusterDescriptor) throws ClusterDeploymentException {
	final int containerMemory = 256;
	final ClusterClient<ApplicationId> yarnClusterClient = yarnClusterDescriptor.deploySessionCluster(
		new ClusterSpecification.ClusterSpecificationBuilder()
			.setMasterMemoryMB(containerMemory)
			.setTaskManagerMemoryMB(containerMemory)
			.setSlotsPerTaskManager(1)
			.createClusterSpecification());

	assertThat(yarnClusterClient, is(instanceOf(RestClusterClient.class)));
	return (RestClusterClient<ApplicationId>) yarnClusterClient;
}
 
Example #21
Source File: YarnClusterDescriptorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testConfigOverwrite() throws ClusterDeploymentException {
	Configuration configuration = new Configuration();
	// overwrite vcores in config
	configuration.setInteger(YarnConfigOptions.VCORES, Integer.MAX_VALUE);
	configuration.setInteger(ResourceManagerOptions.CONTAINERIZED_HEAP_CUTOFF_MIN, 0);

	YarnClusterDescriptor clusterDescriptor = createYarnClusterDescriptor(configuration);

	clusterDescriptor.setLocalJarPath(new Path(flinkJar.getPath()));

	// configure slots
	ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder()
		.setMasterMemoryMB(1)
		.setTaskManagerMemoryMB(1)
		.setNumberTaskManagers(1)
		.setSlotsPerTaskManager(1)
		.createClusterSpecification();

	try {
		clusterDescriptor.deploySessionCluster(clusterSpecification);

		fail("The deploy call should have failed.");
	} catch (ClusterDeploymentException e) {
		// we expect the cause to be an IllegalConfigurationException
		if (!(e.getCause() instanceof IllegalConfigurationException)) {
			throw e;
		}
	} finally {
		clusterDescriptor.close();
	}
}
 
Example #22
Source File: YarnClusterDescriptorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testFailIfTaskSlotsHigherThanMaxVcores() throws ClusterDeploymentException {
	final Configuration flinkConfiguration = new Configuration();
	flinkConfiguration.setInteger(ResourceManagerOptions.CONTAINERIZED_HEAP_CUTOFF_MIN, 0);

	YarnClusterDescriptor clusterDescriptor = createYarnClusterDescriptor(flinkConfiguration);

	clusterDescriptor.setLocalJarPath(new Path(flinkJar.getPath()));

	ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder()
		.setMasterMemoryMB(1)
		.setTaskManagerMemoryMB(1)
		.setNumberTaskManagers(1)
		.setSlotsPerTaskManager(Integer.MAX_VALUE)
		.createClusterSpecification();

	try {
		clusterDescriptor.deploySessionCluster(clusterSpecification);

		fail("The deploy call should have failed.");
	} catch (ClusterDeploymentException e) {
		// we expect the cause to be an IllegalConfigurationException
		if (!(e.getCause() instanceof IllegalConfigurationException)) {
			throw e;
		}
	} finally {
		clusterDescriptor.close();
	}
}
 
Example #23
Source File: AbstractYarnClusterDescriptor.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public ClusterClient<ApplicationId> deploySessionCluster(ClusterSpecification clusterSpecification) throws ClusterDeploymentException {
	try {
		return deployInternal(
			clusterSpecification,
			"Flink session cluster",
			getYarnSessionClusterEntrypoint(),
			null,
			false);
	} catch (Exception e) {
		throw new ClusterDeploymentException("Couldn't deploy Yarn session cluster", e);
	}
}
 
Example #24
Source File: YARNHighAvailabilityITCase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private RestClusterClient<ApplicationId> deploySessionCluster(YarnClusterDescriptor yarnClusterDescriptor) throws ClusterDeploymentException {
	final int containerMemory = 256;
	final ClusterClient<ApplicationId> yarnClusterClient = yarnClusterDescriptor.deploySessionCluster(
		new ClusterSpecification.ClusterSpecificationBuilder()
			.setMasterMemoryMB(containerMemory)
			.setTaskManagerMemoryMB(containerMemory)
			.setSlotsPerTaskManager(1)
			.createClusterSpecification());

	assertThat(yarnClusterClient, is(instanceOf(RestClusterClient.class)));
	return (RestClusterClient<ApplicationId>) yarnClusterClient;
}
 
Example #25
Source File: YarnClusterDescriptorTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testConfigOverwrite() throws ClusterDeploymentException {
	Configuration configuration = new Configuration();
	// overwrite vcores in config
	configuration.setInteger(YarnConfigOptions.VCORES, Integer.MAX_VALUE);
	configuration.setInteger(ResourceManagerOptions.CONTAINERIZED_HEAP_CUTOFF_MIN, 0);

	YarnClusterDescriptor clusterDescriptor = new YarnClusterDescriptor(
		configuration,
		yarnConfiguration,
		temporaryFolder.getRoot().getAbsolutePath(),
		yarnClient,
		true);

	clusterDescriptor.setLocalJarPath(new Path(flinkJar.getPath()));

	// configure slots
	ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder()
		.setMasterMemoryMB(1)
		.setTaskManagerMemoryMB(1)
		.setNumberTaskManagers(1)
		.setSlotsPerTaskManager(1)
		.createClusterSpecification();

	try {
		clusterDescriptor.deploySessionCluster(clusterSpecification);

		fail("The deploy call should have failed.");
	} catch (ClusterDeploymentException e) {
		// we expect the cause to be an IllegalConfigurationException
		if (!(e.getCause() instanceof IllegalConfigurationException)) {
			throw e;
		}
	} finally {
		clusterDescriptor.close();
	}
}
 
Example #26
Source File: YarnClusterDescriptorTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testFailIfTaskSlotsHigherThanMaxVcores() throws ClusterDeploymentException {
	final Configuration flinkConfiguration = new Configuration();
	flinkConfiguration.setInteger(ResourceManagerOptions.CONTAINERIZED_HEAP_CUTOFF_MIN, 0);

	YarnClusterDescriptor clusterDescriptor = new YarnClusterDescriptor(
		flinkConfiguration,
		yarnConfiguration,
		temporaryFolder.getRoot().getAbsolutePath(),
		yarnClient,
		true);

	clusterDescriptor.setLocalJarPath(new Path(flinkJar.getPath()));

	ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder()
		.setMasterMemoryMB(1)
		.setTaskManagerMemoryMB(1)
		.setNumberTaskManagers(1)
		.setSlotsPerTaskManager(Integer.MAX_VALUE)
		.createClusterSpecification();

	try {
		clusterDescriptor.deploySessionCluster(clusterSpecification);

		fail("The deploy call should have failed.");
	} catch (ClusterDeploymentException e) {
		// we expect the cause to be an IllegalConfigurationException
		if (!(e.getCause() instanceof IllegalConfigurationException)) {
			throw e;
		}
	} finally {
		clusterDescriptor.close();
	}
}
 
Example #27
Source File: KubernetesClusterDescriptor.java    From flink with Apache License 2.0 4 votes vote down vote up
private ClusterClientProvider<String> deployClusterInternal(
		String entryPoint,
		ClusterSpecification clusterSpecification,
		boolean detached) throws ClusterDeploymentException {
	final ClusterEntrypoint.ExecutionMode executionMode = detached ?
		ClusterEntrypoint.ExecutionMode.DETACHED
		: ClusterEntrypoint.ExecutionMode.NORMAL;
	flinkConfig.setString(ClusterEntrypoint.EXECUTION_MODE, executionMode.toString());

	flinkConfig.setString(KubernetesConfigOptionsInternal.ENTRY_POINT_CLASS, entryPoint);

	// Rpc, blob, rest, taskManagerRpc ports need to be exposed, so update them to fixed values.
	KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, BlobServerOptions.PORT, Constants.BLOB_SERVER_PORT);
	KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, TaskManagerOptions.RPC_PORT, Constants.TASK_MANAGER_RPC_PORT);
	KubernetesUtils.checkAndUpdatePortConfigOption(flinkConfig, RestOptions.BIND_PORT, Constants.REST_PORT);

	if (HighAvailabilityMode.isHighAvailabilityModeActivated(flinkConfig)) {
		flinkConfig.setString(HighAvailabilityOptions.HA_CLUSTER_ID, clusterId);
		KubernetesUtils.checkAndUpdatePortConfigOption(
			flinkConfig,
			HighAvailabilityOptions.HA_JOB_MANAGER_PORT_RANGE,
			flinkConfig.get(JobManagerOptions.PORT));
	}

	try {
		final KubernetesJobManagerParameters kubernetesJobManagerParameters =
			new KubernetesJobManagerParameters(flinkConfig, clusterSpecification);

		final KubernetesJobManagerSpecification kubernetesJobManagerSpec =
			KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification(kubernetesJobManagerParameters);

		client.createJobManagerComponent(kubernetesJobManagerSpec);

		return createClusterClientProvider(clusterId);
	} catch (Exception e) {
		try {
			LOG.warn("Failed to create the Kubernetes cluster \"{}\", try to clean up the residual resources.", clusterId);
			client.stopAndCleanupCluster(clusterId);
		} catch (Exception e1) {
			LOG.info("Failed to stop and clean up the Kubernetes cluster \"{}\".", clusterId, e1);
		}
		throw new ClusterDeploymentException("Could not create Kubernetes cluster \"" + clusterId + "\".", e);
	}
}
 
Example #28
Source File: KubernetesClusterDescriptorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
private ClusterClientProvider<String> deploySessionCluster() throws ClusterDeploymentException {
	mockExpectedServiceFromServerSide(loadBalancerSvc);
	return descriptor.deploySessionCluster(clusterSpecification);
}
 
Example #29
Source File: AbstractYarnClusterTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public ClusterClient<ApplicationId> deployJobCluster(ClusterSpecification clusterSpecification, JobGraph jobGraph, boolean detached) throws ClusterDeploymentException {
	throw new UnsupportedOperationException("Not needed for testing");
}
 
Example #30
Source File: AbstractYarnClusterTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public ClusterClient<ApplicationId> deployJobCluster(ClusterSpecification clusterSpecification, JobGraph jobGraph, boolean detached) throws ClusterDeploymentException {
	throw new UnsupportedOperationException("Not needed for testing");
}