Java Code Examples for org.apache.flink.client.program.ClusterClient#getClusterId()

The following examples show how to use org.apache.flink.client.program.ClusterClient#getClusterId() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FlinkYarnJobLauncher.java    From sylph with Apache License 2.0 6 votes vote down vote up
private ApplicationId start(YarnJobDescriptor descriptor, JobGraph jobGraph)
        throws Exception
{
    try {
        logger.info("start flink job {}", jobGraph.getJobID());
        ClusterClient<ApplicationId> client = descriptor.deploy(jobGraph, true);  //create yarn appMaster
        ApplicationId applicationId = client.getClusterId();
        client.shutdown();
        return applicationId;
    }
    catch (Throwable e) {
        logger.error("submitting job {} failed", jobGraph.getJobID(), e);
        cleanupStagingDir(descriptor.getUploadingDir());
        throw e;
    }
}
 
Example 2
Source File: HadoopUtils.java    From zeppelin with Apache License 2.0 5 votes vote down vote up
public static String getYarnAppTrackingUrl(ClusterClient clusterClient) throws IOException, YarnException {
  ApplicationId yarnAppId = (ApplicationId) clusterClient.getClusterId();
  YarnClient yarnClient = YarnClient.createYarnClient();
  YarnConfiguration yarnConf = new YarnConfiguration();
  // disable timeline service as we only query yarn app here.
  // Otherwise we may hit this kind of ERROR:
  // java.lang.ClassNotFoundException: com.sun.jersey.api.client.config.ClientConfig
  yarnConf.set("yarn.timeline-service.enabled", "false");
  yarnClient.init(yarnConf);
  yarnClient.start();
  return yarnClient.getApplicationReport(yarnAppId).getTrackingUrl();
}
 
Example 3
Source File: HadoopUtils.java    From zeppelin with Apache License 2.0 5 votes vote down vote up
public static void cleanupStagingDirInternal(ClusterClient clusterClient) {
  try {
    ApplicationId appId = (ApplicationId) clusterClient.getClusterId();
    FileSystem fs = FileSystem.get(new Configuration());
    Path stagingDirPath = new Path(fs.getHomeDirectory(), ".flink/" + appId.toString());
    if (fs.delete(stagingDirPath, true)) {
      LOGGER.info("Deleted staging directory " + stagingDirPath);
    }
  } catch (IOException e){
      LOGGER.warn("Failed to cleanup staging dir", e);
  }
}
 
Example 4
Source File: YARNITCase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testPerJobMode() throws Exception {
	Configuration configuration = new Configuration();
	configuration.setString(AkkaOptions.ASK_TIMEOUT, "30 s");
	final YarnClient yarnClient = getYarnClient();

	try (final YarnClusterDescriptor yarnClusterDescriptor = new YarnClusterDescriptor(
		configuration,
		getYarnConfiguration(),
		System.getenv(ConfigConstants.ENV_FLINK_CONF_DIR),
		yarnClient,
		true)) {

		yarnClusterDescriptor.setLocalJarPath(new Path(flinkUberjar.getAbsolutePath()));
		yarnClusterDescriptor.addShipFiles(Arrays.asList(flinkLibFolder.listFiles()));
		yarnClusterDescriptor.addShipFiles(Arrays.asList(flinkShadedHadoopDir.listFiles()));

		final ClusterSpecification clusterSpecification = new ClusterSpecification.ClusterSpecificationBuilder()
			.setMasterMemoryMB(768)
			.setTaskManagerMemoryMB(1024)
			.setSlotsPerTaskManager(1)
			.setNumberTaskManagers(1)
			.createClusterSpecification();

		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(2);

		env.addSource(new NoDataSource())
			.shuffle()
			.addSink(new DiscardingSink<>());

		final JobGraph jobGraph = env.getStreamGraph().getJobGraph();

		File testingJar = YarnTestBase.findFile("..", new YarnTestUtils.TestJarFinder("flink-yarn-tests"));

		jobGraph.addJar(new org.apache.flink.core.fs.Path(testingJar.toURI()));

		ApplicationId applicationId = null;
		ClusterClient<ApplicationId> clusterClient = null;

		try {
			clusterClient = yarnClusterDescriptor.deployJobCluster(
				clusterSpecification,
				jobGraph,
				false);
			applicationId = clusterClient.getClusterId();

			assertThat(clusterClient, is(instanceOf(RestClusterClient.class)));
			final RestClusterClient<ApplicationId> restClusterClient = (RestClusterClient<ApplicationId>) clusterClient;

			final CompletableFuture<JobResult> jobResultCompletableFuture = restClusterClient.requestJobResult(jobGraph.getJobID());

			final JobResult jobResult = jobResultCompletableFuture.get();

			assertThat(jobResult, is(notNullValue()));
			assertThat(jobResult.getSerializedThrowable().isPresent(), is(false));
		} finally {
			if (clusterClient != null) {
				clusterClient.shutdown();
			}

			if (applicationId != null) {
				yarnClusterDescriptor.killCluster(applicationId);
			}
		}
	}
}