org.apache.flink.api.common.JobExecutionResult Java Examples

The following examples show how to use org.apache.flink.api.common.JobExecutionResult. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RemoteStreamEnvironment.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Executes the job remotely.
 *
 * <p>This method can be used independent of the {@link StreamExecutionEnvironment} type.
 * @return The result of the job execution, containing elapsed time and accumulators.
 */
@PublicEvolving
public static JobExecutionResult executeRemotely(StreamExecutionEnvironment streamExecutionEnvironment,
	List<URL> jarFiles,
	String host,
	int port,
	Configuration clientConfiguration,
	List<URL> globalClasspaths,
	String jobName,
	SavepointRestoreSettings savepointRestoreSettings
) throws ProgramInvocationException {
	StreamGraph streamGraph = streamExecutionEnvironment.getStreamGraph(jobName);
	return executeRemotely(streamGraph,
		streamExecutionEnvironment.getClass().getClassLoader(),
		streamExecutionEnvironment.getConfig(),
		jarFiles,
		host,
		port,
		clientConfiguration,
		globalClasspaths,
		savepointRestoreSettings);
}
 
Example #2
Source File: ClusterClientJobClientAdapter.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<JobExecutionResult> getJobExecutionResult(final ClassLoader userClassloader) {
	checkNotNull(userClassloader);

	return bridgeClientRequest(
			clusterClientProvider,
			(clusterClient -> clusterClient
				.requestJobResult(jobID)
				.thenApply((jobResult) -> {
					try {
						return jobResult.toJobExecutionResult(userClassloader);
					} catch (Throwable t) {
						throw new CompletionException(
								new ProgramInvocationException("Job failed", jobID, t));
					}
				})));
}
 
Example #3
Source File: StreamPlanEnvironment.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {

	StreamGraph streamGraph = getStreamGraph();
	streamGraph.setJobName(jobName);

	transformations.clear();

	if (env instanceof OptimizerPlanEnvironment) {
		((OptimizerPlanEnvironment) env).setPlan(streamGraph);
	} else if (env instanceof PreviewPlanEnvironment) {
		((PreviewPlanEnvironment) env).setPreview(streamGraph.getStreamingPlanAsJSON());
	}

	throw new OptimizerPlanEnvironment.ProgramAbortException();
}
 
Example #4
Source File: JobListenerITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testExecuteAsyncCallsJobListenerOnMainThreadOnBatchEnvironment() throws Exception {
	AtomicReference<Thread> threadReference = new AtomicReference<>();

	ExecutionEnvironment env = new ExecutionEnvironment(getClientConfiguration());

	env.registerJobListener(new JobListener() {
		@Override
		public void onJobSubmitted(JobClient jobClient, Throwable t) {
			threadReference.set(Thread.currentThread());
		}

		@Override
		public void onJobExecuted(JobExecutionResult jobExecutionResult, Throwable throwable) {
		}
	});

	env.fromElements(1, 2, 3, 4, 5).output(new DiscardingOutputFormat<>());
	env.executeAsync();

	assertThat(Thread.currentThread(), is(threadReference.get()));
}
 
Example #5
Source File: StreamContextEnvironment.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {
	Preconditions.checkNotNull(jobName, "Streaming Job name should not be null.");

	StreamGraph streamGraph = this.getStreamGraph();
	streamGraph.setJobName(jobName);

	transformations.clear();

	// execute the programs
	if (ctx instanceof DetachedEnvironment) {
		LOG.warn("Job was executed in detached mode, the results will be available on completion.");
		((DetachedEnvironment) ctx).setDetachedPlan(streamGraph);
		return DetachedEnvironment.DetachedJobExecutionResult.INSTANCE;
	} else {
		return ctx
			.getClient()
			.run(streamGraph, ctx.getJars(), ctx.getClasspaths(), ctx.getUserCodeClassLoader(), ctx.getSavepointRestoreSettings())
			.getJobExecutionResult();
	}
}
 
Example #6
Source File: JsonJobGraphGenerationTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {
	Plan plan = createProgramPlan(jobName);

	Optimizer pc = new Optimizer(new Configuration());
	OptimizedPlan op = pc.compile(plan);

	JobGraphGenerator jgg = new JobGraphGenerator();
	JobGraph jobGraph = jgg.compileJobGraph(op);

	String jsonPlan = JsonPlanGenerator.generatePlan(jobGraph);

	// first check that the JSON is valid
	JsonParser parser = new JsonFactory().createJsonParser(jsonPlan);
	while (parser.nextToken() != null) {}

	validator.validateJson(jsonPlan);

	throw new AbortError();
}
 
Example #7
Source File: RichInputOutputITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected void testProgram() throws Exception {
	// test verifying the number of records read and written vs the accumulator counts

	readCalls = new ConcurrentLinkedQueue<Integer>();
	writeCalls = new ConcurrentLinkedQueue<Integer>();
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	env.createInput(new TestInputFormat(new Path(inputPath))).output(new TestOutputFormat());

	JobExecutionResult result = env.execute();
	Object a = result.getAllAccumulatorResults().get("DATA_SOURCE_ACCUMULATOR");
	Object b = result.getAllAccumulatorResults().get("DATA_SINK_ACCUMULATOR");
	long recordsRead = (Long) a;
	long recordsWritten = (Long) b;
	assertEquals(recordsRead, readCalls.size());
	assertEquals(recordsWritten, writeCalls.size());
}
 
Example #8
Source File: JsonJobGraphGenerationTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {
	Plan plan = createProgramPlan(jobName);

	Optimizer pc = new Optimizer(new Configuration());
	OptimizedPlan op = pc.compile(plan);

	JobGraphGenerator jgg = new JobGraphGenerator();
	JobGraph jobGraph = jgg.compileJobGraph(op);

	String jsonPlan = JsonPlanGenerator.generatePlan(jobGraph);

	// first check that the JSON is valid
	JsonParser parser = new JsonFactory().createJsonParser(jsonPlan);
	while (parser.nextToken() != null) {}

	validator.validateJson(jsonPlan);

	throw new AbortError();
}
 
Example #9
Source File: CollectionExecutionAccumulatorsTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testAccumulator() {
	try {
		final int numElements = 100;

		ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment();

		env.generateSequence(1, numElements)
			.map(new CountingMapper())
			.output(new DiscardingOutputFormat<Long>());

		JobExecutionResult result = env.execute();

		assertTrue(result.getNetRuntime() >= 0);

		assertEquals(numElements, (int) result.getAccumulatorResult(ACCUMULATOR_NAME));
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example #10
Source File: JobListenerITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testExecuteAsyncCallsJobListenerOnMainThreadOnStreamEnvironment() throws Exception {
	AtomicReference<Thread> threadReference = new AtomicReference<>();

	StreamExecutionEnvironment env = new StreamExecutionEnvironment(getClientConfiguration());

	env.registerJobListener(new JobListener() {
		@Override
		public void onJobSubmitted(JobClient jobClient, Throwable t) {
			threadReference.set(Thread.currentThread());
		}

		@Override
		public void onJobExecuted(JobExecutionResult jobExecutionResult, Throwable throwable) {
		}
	});

	env.fromElements(1, 2, 3, 4, 5).addSink(new DiscardingSink<>());
	env.executeAsync();

	assertThat(Thread.currentThread(), is(threadReference.get()));
}
 
Example #11
Source File: AccumulatorITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected void postSubmit() throws Exception {
	compareResultsByLinesInMemory(EXPECTED, resultPath);

	// Test accumulator results
	System.out.println("Accumulator results:");
	JobExecutionResult res = this.result;
	System.out.println(AccumulatorHelper.getResultsFormatted(res.getAllAccumulatorResults()));

	Assert.assertEquals(Integer.valueOf(3), res.getAccumulatorResult("num-lines"));
	Assert.assertEquals(Integer.valueOf(3), res.getIntCounterResult("num-lines"));

	Assert.assertEquals(Double.valueOf(getParallelism()), res.getAccumulatorResult("open-close-counter"));

	// Test histogram (words per line distribution)
	Map<Integer, Integer> dist = new HashMap<>();
	dist.put(1, 1); dist.put(2, 1); dist.put(3, 1);
	Assert.assertEquals(dist, res.getAccumulatorResult("words-per-line"));

	// Test distinct words (custom accumulator)
	Set<StringValue> distinctWords = new HashSet<>();
	distinctWords.add(new StringValue("one"));
	distinctWords.add(new StringValue("two"));
	distinctWords.add(new StringValue("three"));
	Assert.assertEquals(distinctWords, res.getAccumulatorResult("distinct-words"));
}
 
Example #12
Source File: ExecutionEnvironment.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Triggers the program execution. The environment will execute all parts of the program that have
 * resulted in a "sink" operation. Sink operations are for example printing results ({@link DataSet#print()},
 * writing results (e.g. {@link DataSet#writeAsText(String)},
 * {@link DataSet#write(org.apache.flink.api.common.io.FileOutputFormat, String)}, or other generic
 * data sinks created with {@link DataSet#output(org.apache.flink.api.common.io.OutputFormat)}.
 *
 * <p>The program execution will be logged and displayed with the given job name.
 *
 * @return The result of the job execution, containing elapsed time and accumulators.
 * @throws Exception Thrown, if the program executions fails.
 */
public JobExecutionResult execute(String jobName) throws Exception {
	final JobClient jobClient = executeAsync(jobName);

	try {
		if (configuration.getBoolean(DeploymentOptions.ATTACHED)) {
			lastJobExecutionResult = jobClient.getJobExecutionResult(userClassloader).get();
		} else {
			lastJobExecutionResult = new DetachedJobExecutionResult(jobClient.getJobID());
		}

		jobListeners.forEach(
				jobListener -> jobListener.onJobExecuted(lastJobExecutionResult, null));

	} catch (Throwable t) {
		jobListeners.forEach(jobListener -> {
			jobListener.onJobExecuted(null, ExceptionUtils.stripExecutionException(t));
		});
		ExceptionUtils.rethrowException(t);
	}

	return lastJobExecutionResult;
}
 
Example #13
Source File: ScalaShellRemoteStreamEnvironment.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Executes the remote job.
 *
 * @param streamGraph
 *            Stream Graph to execute
 * @param jarFiles
 * 			  List of jar file URLs to ship to the cluster
 * @return The result of the job execution, containing elapsed time and accumulators.
 */
@Override
protected JobExecutionResult executeRemotely(StreamGraph streamGraph, List<URL> jarFiles) throws ProgramInvocationException {
	URL jarUrl;
	try {
		jarUrl = flinkILoop.writeFilesToDisk().getAbsoluteFile().toURI().toURL();
	} catch (MalformedURLException e) {
		throw new ProgramInvocationException("Could not write the user code classes to disk.",
			streamGraph.getJobGraph().getJobID(), e);
	}

	List<URL> allJarFiles = new ArrayList<>(jarFiles.size() + 1);
	allJarFiles.addAll(jarFiles);
	allJarFiles.add(jarUrl);

	return super.executeRemotely(streamGraph, allJarFiles);
}
 
Example #14
Source File: CollectionExecutionAccumulatorsTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testAccumulator() {
	try {
		final int numElements = 100;

		ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment();

		env.generateSequence(1, numElements)
			.map(new CountingMapper())
			.output(new DiscardingOutputFormat<Long>());

		JobExecutionResult result = env.execute();

		assertTrue(result.getNetRuntime() >= 0);

		assertEquals(numElements, (int) result.getAccumulatorResult(ACCUMULATOR_NAME));
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example #15
Source File: CollectionExecutionAccumulatorsTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testAccumulator() {
	try {
		final int numElements = 100;

		ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment();

		env.generateSequence(1, numElements)
			.map(new CountingMapper())
			.output(new DiscardingOutputFormat<Long>());

		JobExecutionResult result = env.execute();

		assertTrue(result.getNetRuntime() >= 0);

		assertEquals(numElements, (int) result.getAccumulatorResult(ACCUMULATOR_NAME));
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example #16
Source File: JobListenerITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testExecuteCallsJobListenerOnMainThreadOnStreamEnvironment() throws Exception {
	AtomicReference<Thread> threadReference = new AtomicReference<>();

	StreamExecutionEnvironment env = new StreamExecutionEnvironment(getClientConfiguration());

	env.registerJobListener(new JobListener() {
		@Override
		public void onJobSubmitted(JobClient jobClient, Throwable t) {
			threadReference.set(Thread.currentThread());
		}

		@Override
		public void onJobExecuted(JobExecutionResult jobExecutionResult, Throwable throwable) {
		}
	});

	env.fromElements(1, 2, 3, 4, 5).addSink(new DiscardingSink<>());
	env.execute();

	assertThat(Thread.currentThread(), is(threadReference.get()));
}
 
Example #17
Source File: PreviewPlanEnvironment.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {
	this.plan = createProgramPlan(jobName);
	this.previewPlan = Optimizer.createPreOptimizedPlan(plan);

	// do not go on with anything now!
	throw new OptimizerPlanEnvironment.ProgramAbortException();
}
 
Example #18
Source File: FlinkPipelineRunner.java    From beam with Apache License 2.0 5 votes vote down vote up
private PortablePipelineResult createPortablePipelineResult(
    JobExecutionResult result, PipelineOptions options) {
  // The package of DetachedJobExecutionResult has been changed in 1.10.
  // Refer to https://github.com/apache/flink/commit/c36b35e6876ecdc717dade653e8554f9d8b543c9 for
  // details.
  String resultClassName = result.getClass().getCanonicalName();
  if (resultClassName.equals(
          "org.apache.flink.client.program.DetachedEnvironment.DetachedJobExecutionResult")
      || resultClassName.equals("org.apache.flink.core.execution.DetachedJobExecutionResult")) {
    LOG.info("Pipeline submitted in Detached mode");
    // no metricsPusher because metrics are not supported in detached mode
    return new FlinkPortableRunnerResult.Detached();
  } else {
    LOG.info("Execution finished in {} msecs", result.getNetRuntime());
    Map<String, Object> accumulators = result.getAllAccumulatorResults();
    if (accumulators != null && !accumulators.isEmpty()) {
      LOG.info("Final accumulator values:");
      for (Map.Entry<String, Object> entry : result.getAllAccumulatorResults().entrySet()) {
        LOG.info("{} : {}", entry.getKey(), entry.getValue());
      }
    }
    FlinkPortableRunnerResult flinkRunnerResult =
        new FlinkPortableRunnerResult(accumulators, result.getNetRuntime());
    MetricsPusher metricsPusher =
        new MetricsPusher(
            flinkRunnerResult.getMetricsContainerStepMap(),
            options.as(MetricsOptions.class),
            flinkRunnerResult);
    metricsPusher.start();
    return flinkRunnerResult;
  }
}
 
Example #19
Source File: CollectionExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
public JobExecutionResult execute(Plan program) throws Exception {
	long startTime = System.currentTimeMillis();

	initCache(program.getCachedFiles());
	Collection<? extends GenericDataSinkBase<?>> sinks = program.getDataSinks();
	for (Operator<?> sink : sinks) {
		execute(sink);
	}
	
	long endTime = System.currentTimeMillis();
	Map<String, OptionalFailure<Object>> accumulatorResults = AccumulatorHelper.toResultMap(accumulators);
	return new JobExecutionResult(null, endTime - startTime, accumulatorResults);
}
 
Example #20
Source File: ContextEnvironment.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {
	Plan p = createProgramPlan(jobName);
	JobWithJars toRun = new JobWithJars(p, this.jarFilesToAttach, this.classpathsToAttach,
			this.userCodeClassLoader);
	this.lastJobExecutionResult = client.run(toRun, getParallelism(), savepointSettings).getJobExecutionResult();
	return this.lastJobExecutionResult;
}
 
Example #21
Source File: CollectionEnvironment.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {
	Plan p = createProgramPlan(jobName);

	// We need to reverse here. Object-Reuse enabled, means safe mode is disabled.
	CollectionExecutor exec = new CollectionExecutor(getConfig());
	this.lastJobExecutionResult = exec.execute(p);
	return this.lastJobExecutionResult;
}
 
Example #22
Source File: OptimizerPlanEnvironment.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {
	Plan plan = createProgramPlan(jobName);
	this.optimizerPlan = compiler.compile(plan);

	// do not go on with anything now!
	throw new ProgramAbortException();
}
 
Example #23
Source File: DataSet.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Convenience method to get the count (number of elements) of a DataSet.
 *
 * @return A long integer that represents the number of elements in the data set.
 */
public long count() throws Exception {
	final String id = new AbstractID().toString();

	output(new Utils.CountHelper<T>(id)).name("count()");

	JobExecutionResult res = getExecutionEnvironment().execute();
	return res.<Long> getAccumulatorResult(id);
}
 
Example #24
Source File: RemoteStreamExecutionEnvironmentTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Verifies that the port passed to the RemoteStreamEnvironment is used for connecting to the cluster.
 */
@Test
public void testPortForwarding() throws Exception {

	String host = "fakeHost";
	int port = 99;
	JobExecutionResult expectedResult = new JobExecutionResult(null, 0, null);

	RestClusterClient mockedClient = Mockito.mock(RestClusterClient.class);
	when(mockedClient.run(Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any(), Mockito.any()))
		.thenReturn(expectedResult);

	PowerMockito.whenNew(RestClusterClient.class).withAnyArguments().thenAnswer((invocation) -> {
			Object[] args = invocation.getArguments();
			Configuration config = (Configuration) args[0];

			Assert.assertEquals(host, config.getString(RestOptions.ADDRESS));
			Assert.assertEquals(port, config.getInteger(RestOptions.PORT));
			return mockedClient;
		}
	);

	final Configuration clientConfiguration = new Configuration();
	final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(
		host, port, clientConfiguration);
	env.fromElements(1).map(x -> x * 2);
	JobExecutionResult actualResult = env.execute("fakeJobName");
	Assert.assertEquals(expectedResult, actualResult);
}
 
Example #25
Source File: RemoteStreamEnvironment.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Executes the remote job.
 *
 * <p>Note: This method exposes stream graph internal in the public API, but cannot be removed for backward compatibility.
 * @param streamGraph
 *            Stream Graph to execute
 * @param jarFiles
 * 			  List of jar file URLs to ship to the cluster
 * @return The result of the job execution, containing elapsed time and accumulators.
 */
@Deprecated
protected JobExecutionResult executeRemotely(StreamGraph streamGraph, List<URL> jarFiles) throws ProgramInvocationException {
	return executeRemotely(streamGraph,
		this.getClass().getClassLoader(),
		getConfig(),
		jarFiles,
		host,
		port,
		clientConfiguration,
		globalClasspaths,
		savepointRestoreSettings);
}
 
Example #26
Source File: RemoteExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public JobExecutionResult executePlan(Plan plan) throws Exception {
	if (plan == null) {
		throw new IllegalArgumentException("The plan may not be null.");
	}

	JobWithJars p = new JobWithJars(plan, this.jarFiles, this.globalClasspaths);
	return executePlanWithJars(p);
}
 
Example #27
Source File: TestEnvironment.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public JobExecutionResult getLastJobExecutionResult() {
	if (lastEnv == null) {
		return lastJobExecutionResult;
	}
	else {
		return lastEnv.getLastJobExecutionResult();
	}
}
 
Example #28
Source File: ProgramDeployer.java    From flink with Apache License 2.0 5 votes vote down vote up
private <T> void deployJobOnNewCluster(
		ClusterDescriptor<T> clusterDescriptor,
		JobGraph jobGraph,
		Result<T> result,
		ClassLoader classLoader) throws Exception {
	ClusterClient<T> clusterClient = null;
	try {
		// deploy job cluster with job attached
		clusterClient = clusterDescriptor.deployJobCluster(context.getClusterSpec(), jobGraph, false);
		// save information about the new cluster
		result.setClusterInformation(clusterClient.getClusterId(), clusterClient.getWebInterfaceURL());
		// get result
		if (awaitJobResult) {
			// we need to hard cast for now
			final JobExecutionResult jobResult = ((RestClusterClient<T>) clusterClient)
					.requestJobResult(jobGraph.getJobID())
					.get()
					.toJobExecutionResult(context.getClassLoader()); // throws exception if job fails
			executionResultBucket.add(jobResult);
		}
	} finally {
		try {
			if (clusterClient != null) {
				clusterClient.shutdown();
			}
		} catch (Exception e) {
			// ignore
		}
	}
}
 
Example #29
Source File: TestEnvironment.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public JobExecutionResult getLastJobExecutionResult() {
	if (lastEnv == null) {
		return lastJobExecutionResult;
	}
	else {
		return lastEnv.getLastJobExecutionResult();
	}
}
 
Example #30
Source File: DataSet.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Convenience method to get the count (number of elements) of a DataSet.
 *
 * @return A long integer that represents the number of elements in the data set.
 */
public long count() throws Exception {
	final String id = new AbstractID().toString();

	output(new Utils.CountHelper<T>(id)).name("count()");

	JobExecutionResult res = getExecutionEnvironment().execute();
	return res.<Long> getAccumulatorResult(id);
}