Java Code Examples for org.apache.flink.client.program.ClusterClient#shutdown()

The following examples show how to use org.apache.flink.client.program.ClusterClient#shutdown() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CliFrontendSavepointTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testTriggerSavepointSuccess() throws Exception {
	replaceStdOutAndStdErr();

	JobID jobId = new JobID();

	String savepointPath = "expectedSavepointPath";

	final ClusterClient<String> clusterClient = createClusterClient(savepointPath);

	try {
		MockedCliFrontend frontend = new MockedCliFrontend(clusterClient);

		String[] parameters = { jobId.toString() };
		frontend.savepoint(parameters);

		verify(clusterClient, times(1))
			.triggerSavepoint(eq(jobId), isNull(String.class));

		assertTrue(buffer.toString().contains(savepointPath));
	}
	finally {
		clusterClient.shutdown();
		restoreStdOutAndStdErr();
	}
}
 
Example 2
Source File: CliFrontendSavepointTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testDisposeSavepointSuccess() throws Exception {
	replaceStdOutAndStdErr();

	String savepointPath = "expectedSavepointPath";

	ClusterClient clusterClient = new DisposeSavepointClusterClient(
		(String path) -> CompletableFuture.completedFuture(Acknowledge.get()), getConfiguration());

	try {

		CliFrontend frontend = new MockedCliFrontend(clusterClient);

		String[] parameters = { "-d", savepointPath };
		frontend.savepoint(parameters);

		String outMsg = buffer.toString();
		assertTrue(outMsg.contains(savepointPath));
		assertTrue(outMsg.contains("disposed"));
	}
	finally {
		clusterClient.shutdown();
		restoreStdOutAndStdErr();
	}
}
 
Example 3
Source File: CliFrontendSavepointTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testTriggerSavepointSuccess() throws Exception {
	replaceStdOutAndStdErr();

	JobID jobId = new JobID();

	String savepointPath = "expectedSavepointPath";

	final ClusterClient<String> clusterClient = createClusterClient(savepointPath);

	try {
		MockedCliFrontend frontend = new MockedCliFrontend(clusterClient);

		String[] parameters = { jobId.toString() };
		frontend.savepoint(parameters);

		verify(clusterClient, times(1))
			.triggerSavepoint(eq(jobId), isNull(String.class));

		assertTrue(buffer.toString().contains(savepointPath));
	}
	finally {
		clusterClient.shutdown();
		restoreStdOutAndStdErr();
	}
}
 
Example 4
Source File: CliFrontendSavepointTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testDisposeSavepointSuccess() throws Exception {
	replaceStdOutAndStdErr();

	String savepointPath = "expectedSavepointPath";

	ClusterClient clusterClient = new DisposeSavepointClusterClient(
		(String path) -> CompletableFuture.completedFuture(Acknowledge.get()), getConfiguration());

	try {

		CliFrontend frontend = new MockedCliFrontend(clusterClient);

		String[] parameters = { "-d", savepointPath };
		frontend.savepoint(parameters);

		String outMsg = buffer.toString();
		assertTrue(outMsg.contains(savepointPath));
		assertTrue(outMsg.contains("disposed"));
	}
	finally {
		clusterClient.shutdown();
		restoreStdOutAndStdErr();
	}
}
 
Example 5
Source File: FlinkYarnJobLauncher.java    From sylph with Apache License 2.0 6 votes vote down vote up
private ApplicationId start(YarnJobDescriptor descriptor, JobGraph jobGraph)
        throws Exception
{
    try {
        logger.info("start flink job {}", jobGraph.getJobID());
        ClusterClient<ApplicationId> client = descriptor.deploy(jobGraph, true);  //create yarn appMaster
        ApplicationId applicationId = client.getClusterId();
        client.shutdown();
        return applicationId;
    }
    catch (Throwable e) {
        logger.error("submitting job {} failed", jobGraph.getJobID(), e);
        cleanupStagingDir(descriptor.getUploadingDir());
        throw e;
    }
}
 
Example 6
Source File: ProgramDeployer.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private <T> void deployJobOnNewCluster(
		ClusterDescriptor<T> clusterDescriptor,
		JobGraph jobGraph,
		Result<T> result,
		ClassLoader classLoader) throws Exception {
	ClusterClient<T> clusterClient = null;
	try {
		// deploy job cluster with job attached
		clusterClient = clusterDescriptor.deployJobCluster(context.getClusterSpec(), jobGraph, false);
		// save information about the new cluster
		result.setClusterInformation(clusterClient.getClusterId(), clusterClient.getWebInterfaceURL());
		// get result
		if (awaitJobResult) {
			// we need to hard cast for now
			final JobExecutionResult jobResult = ((RestClusterClient<T>) clusterClient)
					.requestJobResult(jobGraph.getJobID())
					.get()
					.toJobExecutionResult(context.getClassLoader()); // throws exception if job fails
			executionResultBucket.add(jobResult);
		}
	} finally {
		try {
			if (clusterClient != null) {
				clusterClient.shutdown();
			}
		} catch (Exception e) {
			// ignore
		}
	}
}
 
Example 7
Source File: CliFrontendSavepointTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testTriggerSavepointFailure() throws Exception {
	replaceStdOutAndStdErr();

	JobID jobId = new JobID();

	String expectedTestException = "expectedTestException";
	Exception testException = new Exception(expectedTestException);

	final ClusterClient<String> clusterClient = createFailingClusterClient(testException);

	try {
		MockedCliFrontend frontend = new MockedCliFrontend(clusterClient);

		String[] parameters = { jobId.toString() };

		try {
			frontend.savepoint(parameters);

			fail("Savepoint should have failed.");
		} catch (FlinkException e) {
			assertTrue(ExceptionUtils.findThrowableWithMessage(e, expectedTestException).isPresent());
		}
	}
	finally {
		clusterClient.shutdown();
		restoreStdOutAndStdErr();
	}
}
 
Example 8
Source File: CliFrontendSavepointTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that a CLI call with a custom savepoint directory target is
 * forwarded correctly to the cluster client.
 */
@Test
public void testTriggerSavepointCustomTarget() throws Exception {
	replaceStdOutAndStdErr();

	JobID jobId = new JobID();

	String savepointDirectory = "customTargetDirectory";

	final ClusterClient<String> clusterClient = createClusterClient(savepointDirectory);

	try {
		MockedCliFrontend frontend = new MockedCliFrontend(clusterClient);

		String[] parameters = { jobId.toString(), savepointDirectory };
		frontend.savepoint(parameters);

		verify(clusterClient, times(1))
			.triggerSavepoint(eq(jobId), eq(savepointDirectory));

		assertTrue(buffer.toString().contains(savepointDirectory));
	}
	finally {
		clusterClient.shutdown();

		restoreStdOutAndStdErr();
	}
}
 
Example 9
Source File: ProgramDeployer.java    From flink with Apache License 2.0 5 votes vote down vote up
private <T> void deployJobOnNewCluster(
		ClusterDescriptor<T> clusterDescriptor,
		JobGraph jobGraph,
		Result<T> result,
		ClassLoader classLoader) throws Exception {
	ClusterClient<T> clusterClient = null;
	try {
		// deploy job cluster with job attached
		clusterClient = clusterDescriptor.deployJobCluster(context.getClusterSpec(), jobGraph, false);
		// save information about the new cluster
		result.setClusterInformation(clusterClient.getClusterId(), clusterClient.getWebInterfaceURL());
		// get result
		if (awaitJobResult) {
			// we need to hard cast for now
			final JobExecutionResult jobResult = ((RestClusterClient<T>) clusterClient)
					.requestJobResult(jobGraph.getJobID())
					.get()
					.toJobExecutionResult(context.getClassLoader()); // throws exception if job fails
			executionResultBucket.add(jobResult);
		}
	} finally {
		try {
			if (clusterClient != null) {
				clusterClient.shutdown();
			}
		} catch (Exception e) {
			// ignore
		}
	}
}
 
Example 10
Source File: CliFrontendSavepointTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testTriggerSavepointFailure() throws Exception {
	replaceStdOutAndStdErr();

	JobID jobId = new JobID();

	String expectedTestException = "expectedTestException";
	Exception testException = new Exception(expectedTestException);

	final ClusterClient<String> clusterClient = createFailingClusterClient(testException);

	try {
		MockedCliFrontend frontend = new MockedCliFrontend(clusterClient);

		String[] parameters = { jobId.toString() };

		try {
			frontend.savepoint(parameters);

			fail("Savepoint should have failed.");
		} catch (FlinkException e) {
			assertTrue(ExceptionUtils.findThrowableWithMessage(e, expectedTestException).isPresent());
		}
	}
	finally {
		clusterClient.shutdown();
		restoreStdOutAndStdErr();
	}
}
 
Example 11
Source File: CliFrontendSavepointTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that a CLI call with a custom savepoint directory target is
 * forwarded correctly to the cluster client.
 */
@Test
public void testTriggerSavepointCustomTarget() throws Exception {
	replaceStdOutAndStdErr();

	JobID jobId = new JobID();

	String savepointDirectory = "customTargetDirectory";

	final ClusterClient<String> clusterClient = createClusterClient(savepointDirectory);

	try {
		MockedCliFrontend frontend = new MockedCliFrontend(clusterClient);

		String[] parameters = { jobId.toString(), savepointDirectory };
		frontend.savepoint(parameters);

		verify(clusterClient, times(1))
			.triggerSavepoint(eq(jobId), eq(savepointDirectory));

		assertTrue(buffer.toString().contains(savepointDirectory));
	}
	finally {
		clusterClient.shutdown();

		restoreStdOutAndStdErr();
	}
}
 
Example 12
Source File: JobDeployer.java    From AthenaX with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
void start(AthenaXYarnClusterDescriptor descriptor, JobGraph job) throws Exception {
  ClusterClient<ApplicationId> client = descriptor.deploy();
  try {
    client.runDetached(job, null);
    stopAfterJob(client, job.getJobID());
  } finally {
    client.shutdown();
  }
}
 
Example 13
Source File: YARNITCase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testPerJobMode() throws Exception {
	Configuration configuration = new Configuration();
	configuration.setString(AkkaOptions.ASK_TIMEOUT, "30 s");
	final YarnClient yarnClient = getYarnClient();

	try (final YarnClusterDescriptor yarnClusterDescriptor = new YarnClusterDescriptor(
		configuration,
		getYarnConfiguration(),
		System.getenv(ConfigConstants.ENV_FLINK_CONF_DIR),
		yarnClient,
		true)) {

		yarnClusterDescriptor.setLocalJarPath(new Path(flinkUberjar.getAbsolutePath()));
		yarnClusterDescriptor.addShipFiles(Arrays.asList(flinkLibFolder.listFiles()));
		yarnClusterDescriptor.addShipFiles(Arrays.asList(flinkShadedHadoopDir.listFiles()));

		final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder()
			.setMasterMemoryMB(768)
			.setTaskManagerMemoryMB(1024)
			.setSlotsPerTaskManager(1)
			.setNumberTaskManagers(1)
			.createClusterSpecification();

		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(2);

		env.addSource(new NoDataSource())
			.shuffle()
			.addSink(new DiscardingSink<>());

		final JobGraph jobGraph = env.getStreamGraph().getJobGraph();

		File testingJar = YarnTestBase.findFile("..", new YarnTestUtils.TestJarFinder("flink-yarn-tests"));

		jobGraph.addJar(new org.apache.flink.core.fs.Path(testingJar.toURI()));

		ApplicationId applicationId = null;
		ClusterClient<ApplicationId> clusterClient = null;

		try {
			clusterClient = yarnClusterDescriptor.deployJobCluster(
				clusterSpecification,
				jobGraph,
				false);
			applicationId = clusterClient.getClusterId();

			assertThat(clusterClient, is(instanceOf(RestClusterClient.class)));
			final RestClusterClient<ApplicationId> restClusterClient = (RestClusterClient<ApplicationId>) clusterClient;

			final CompletableFuture<JobResult> jobResultCompletableFuture = restClusterClient.requestJobResult(jobGraph.getJobID());

			final JobResult jobResult = jobResultCompletableFuture.get();

			assertThat(jobResult, is(notNullValue()));
			assertThat(jobResult.getSerializedThrowable().isPresent(), is(false));
		} finally {
			if (clusterClient != null) {
				clusterClient.shutdown();
			}

			if (applicationId != null) {
				yarnClusterDescriptor.killCluster(applicationId);
			}
		}
	}
}