Java Code Examples for org.apache.flink.api.common.JobExecutionResult

The following examples show how to use org.apache.flink.api.common.JobExecutionResult. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink   Source File: ExecutionEnvironment.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Triggers the program execution. The environment will execute all parts of the program that have
 * resulted in a "sink" operation. Sink operations are for example printing results ({@link DataSet#print()},
 * writing results (e.g. {@link DataSet#writeAsText(String)},
 * {@link DataSet#write(org.apache.flink.api.common.io.FileOutputFormat, String)}, or other generic
 * data sinks created with {@link DataSet#output(org.apache.flink.api.common.io.OutputFormat)}.
 *
 * <p>The program execution will be logged and displayed with the given job name.
 *
 * @return The result of the job execution, containing elapsed time and accumulators.
 * @throws Exception Thrown, if the program executions fails.
 */
public JobExecutionResult execute(String jobName) throws Exception {
	final JobClient jobClient = executeAsync(jobName);

	try {
		if (configuration.getBoolean(DeploymentOptions.ATTACHED)) {
			lastJobExecutionResult = jobClient.getJobExecutionResult(userClassloader).get();
		} else {
			lastJobExecutionResult = new DetachedJobExecutionResult(jobClient.getJobID());
		}

		jobListeners.forEach(
				jobListener -> jobListener.onJobExecuted(lastJobExecutionResult, null));

	} catch (Throwable t) {
		jobListeners.forEach(jobListener -> {
			jobListener.onJobExecuted(null, ExceptionUtils.stripExecutionException(t));
		});
		ExceptionUtils.rethrowException(t);
	}

	return lastJobExecutionResult;
}
 
Example 2
Source Project: flink   Source File: CollectionExecutionAccumulatorsTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAccumulator() {
	try {
		final int numElements = 100;

		ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment();

		env.generateSequence(1, numElements)
			.map(new CountingMapper())
			.output(new DiscardingOutputFormat<Long>());

		JobExecutionResult result = env.execute();

		assertTrue(result.getNetRuntime() >= 0);

		assertEquals(numElements, (int) result.getAccumulatorResult(ACCUMULATOR_NAME));
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 3
Source Project: flink   Source File: AccumulatorITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void postSubmit() throws Exception {
	compareResultsByLinesInMemory(EXPECTED, resultPath);

	// Test accumulator results
	System.out.println("Accumulator results:");
	JobExecutionResult res = this.result;
	System.out.println(AccumulatorHelper.getResultsFormatted(res.getAllAccumulatorResults()));

	Assert.assertEquals(Integer.valueOf(3), res.getAccumulatorResult("num-lines"));
	Assert.assertEquals(Integer.valueOf(3), res.getIntCounterResult("num-lines"));

	Assert.assertEquals(Double.valueOf(getParallelism()), res.getAccumulatorResult("open-close-counter"));

	// Test histogram (words per line distribution)
	Map<Integer, Integer> dist = new HashMap<>();
	dist.put(1, 1); dist.put(2, 1); dist.put(3, 1);
	Assert.assertEquals(dist, res.getAccumulatorResult("words-per-line"));

	// Test distinct words (custom accumulator)
	Set<StringValue> distinctWords = new HashSet<>();
	distinctWords.add(new StringValue("one"));
	distinctWords.add(new StringValue("two"));
	distinctWords.add(new StringValue("three"));
	Assert.assertEquals(distinctWords, res.getAccumulatorResult("distinct-words"));
}
 
Example 4
Source Project: flink   Source File: JobListenerITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testExecuteCallsJobListenerOnMainThreadOnStreamEnvironment() throws Exception {
	AtomicReference<Thread> threadReference = new AtomicReference<>();

	StreamExecutionEnvironment env = new StreamExecutionEnvironment(getClientConfiguration());

	env.registerJobListener(new JobListener() {
		@Override
		public void onJobSubmitted(JobClient jobClient, Throwable t) {
			threadReference.set(Thread.currentThread());
		}

		@Override
		public void onJobExecuted(JobExecutionResult jobExecutionResult, Throwable throwable) {
		}
	});

	env.fromElements(1, 2, 3, 4, 5).addSink(new DiscardingSink<>());
	env.execute();

	assertThat(Thread.currentThread(), is(threadReference.get()));
}
 
Example 5
Source Project: flink   Source File: CollectionExecutionAccumulatorsTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testAccumulator() {
	try {
		final int numElements = 100;

		ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment();

		env.generateSequence(1, numElements)
			.map(new CountingMapper())
			.output(new DiscardingOutputFormat<Long>());

		JobExecutionResult result = env.execute();

		assertTrue(result.getNetRuntime() >= 0);

		assertEquals(numElements, (int) result.getAccumulatorResult(ACCUMULATOR_NAME));
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 6
@Test
public void testAccumulator() {
	try {
		final int numElements = 100;

		ExecutionEnvironment env = ExecutionEnvironment.createCollectionsEnvironment();

		env.generateSequence(1, numElements)
			.map(new CountingMapper())
			.output(new DiscardingOutputFormat<Long>());

		JobExecutionResult result = env.execute();

		assertTrue(result.getNetRuntime() >= 0);

		assertEquals(numElements, (int) result.getAccumulatorResult(ACCUMULATOR_NAME));
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 7
Source Project: flink   Source File: RemoteStreamEnvironment.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Executes the job remotely.
 *
 * <p>This method can be used independent of the {@link StreamExecutionEnvironment} type.
 * @return The result of the job execution, containing elapsed time and accumulators.
 */
@PublicEvolving
public static JobExecutionResult executeRemotely(StreamExecutionEnvironment streamExecutionEnvironment,
	List<URL> jarFiles,
	String host,
	int port,
	Configuration clientConfiguration,
	List<URL> globalClasspaths,
	String jobName,
	SavepointRestoreSettings savepointRestoreSettings
) throws ProgramInvocationException {
	StreamGraph streamGraph = streamExecutionEnvironment.getStreamGraph(jobName);
	return executeRemotely(streamGraph,
		streamExecutionEnvironment.getClass().getClassLoader(),
		streamExecutionEnvironment.getConfig(),
		jarFiles,
		host,
		port,
		clientConfiguration,
		globalClasspaths,
		savepointRestoreSettings);
}
 
Example 8
/**
 * Executes the remote job.
 *
 * @param streamGraph
 *            Stream Graph to execute
 * @param jarFiles
 * 			  List of jar file URLs to ship to the cluster
 * @return The result of the job execution, containing elapsed time and accumulators.
 */
@Override
protected JobExecutionResult executeRemotely(StreamGraph streamGraph, List<URL> jarFiles) throws ProgramInvocationException {
	URL jarUrl;
	try {
		jarUrl = flinkILoop.writeFilesToDisk().getAbsoluteFile().toURI().toURL();
	} catch (MalformedURLException e) {
		throw new ProgramInvocationException("Could not write the user code classes to disk.",
			streamGraph.getJobGraph().getJobID(), e);
	}

	List<URL> allJarFiles = new ArrayList<>(jarFiles.size() + 1);
	allJarFiles.addAll(jarFiles);
	allJarFiles.add(jarUrl);

	return super.executeRemotely(streamGraph, allJarFiles);
}
 
Example 9
Source Project: flink   Source File: ClusterClientJobClientAdapter.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<JobExecutionResult> getJobExecutionResult(final ClassLoader userClassloader) {
	checkNotNull(userClassloader);

	return bridgeClientRequest(
			clusterClientProvider,
			(clusterClient -> clusterClient
				.requestJobResult(jobID)
				.thenApply((jobResult) -> {
					try {
						return jobResult.toJobExecutionResult(userClassloader);
					} catch (Throwable t) {
						throw new CompletionException(
								new ProgramInvocationException("Job failed", jobID, t));
					}
				})));
}
 
Example 10
Source Project: Flink-CEPplus   Source File: StreamPlanEnvironment.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {

	StreamGraph streamGraph = getStreamGraph();
	streamGraph.setJobName(jobName);

	transformations.clear();

	if (env instanceof OptimizerPlanEnvironment) {
		((OptimizerPlanEnvironment) env).setPlan(streamGraph);
	} else if (env instanceof PreviewPlanEnvironment) {
		((PreviewPlanEnvironment) env).setPreview(streamGraph.getStreamingPlanAsJSON());
	}

	throw new OptimizerPlanEnvironment.ProgramAbortException();
}
 
Example 11
Source Project: flink   Source File: JobListenerITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testExecuteAsyncCallsJobListenerOnMainThreadOnBatchEnvironment() throws Exception {
	AtomicReference<Thread> threadReference = new AtomicReference<>();

	ExecutionEnvironment env = new ExecutionEnvironment(getClientConfiguration());

	env.registerJobListener(new JobListener() {
		@Override
		public void onJobSubmitted(JobClient jobClient, Throwable t) {
			threadReference.set(Thread.currentThread());
		}

		@Override
		public void onJobExecuted(JobExecutionResult jobExecutionResult, Throwable throwable) {
		}
	});

	env.fromElements(1, 2, 3, 4, 5).output(new DiscardingOutputFormat<>());
	env.executeAsync();

	assertThat(Thread.currentThread(), is(threadReference.get()));
}
 
Example 12
Source Project: Flink-CEPplus   Source File: StreamContextEnvironment.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {
	Preconditions.checkNotNull(jobName, "Streaming Job name should not be null.");

	StreamGraph streamGraph = this.getStreamGraph();
	streamGraph.setJobName(jobName);

	transformations.clear();

	// execute the programs
	if (ctx instanceof DetachedEnvironment) {
		LOG.warn("Job was executed in detached mode, the results will be available on completion.");
		((DetachedEnvironment) ctx).setDetachedPlan(streamGraph);
		return DetachedEnvironment.DetachedJobExecutionResult.INSTANCE;
	} else {
		return ctx
			.getClient()
			.run(streamGraph, ctx.getJars(), ctx.getClasspaths(), ctx.getUserCodeClassLoader(), ctx.getSavepointRestoreSettings())
			.getJobExecutionResult();
	}
}
 
Example 13
Source Project: flink   Source File: JsonJobGraphGenerationTest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {
	Plan plan = createProgramPlan(jobName);

	Optimizer pc = new Optimizer(new Configuration());
	OptimizedPlan op = pc.compile(plan);

	JobGraphGenerator jgg = new JobGraphGenerator();
	JobGraph jobGraph = jgg.compileJobGraph(op);

	String jsonPlan = JsonPlanGenerator.generatePlan(jobGraph);

	// first check that the JSON is valid
	JsonParser parser = new JsonFactory().createJsonParser(jsonPlan);
	while (parser.nextToken() != null) {}

	validator.validateJson(jsonPlan);

	throw new AbortError();
}
 
Example 14
Source Project: flink   Source File: RichInputOutputITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void testProgram() throws Exception {
	// test verifying the number of records read and written vs the accumulator counts

	readCalls = new ConcurrentLinkedQueue<Integer>();
	writeCalls = new ConcurrentLinkedQueue<Integer>();
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	env.createInput(new TestInputFormat(new Path(inputPath))).output(new TestOutputFormat());

	JobExecutionResult result = env.execute();
	Object a = result.getAllAccumulatorResults().get("DATA_SOURCE_ACCUMULATOR");
	Object b = result.getAllAccumulatorResults().get("DATA_SINK_ACCUMULATOR");
	long recordsRead = (Long) a;
	long recordsWritten = (Long) b;
	assertEquals(recordsRead, readCalls.size());
	assertEquals(recordsWritten, writeCalls.size());
}
 
Example 15
Source Project: flink   Source File: JsonJobGraphGenerationTest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {
	Plan plan = createProgramPlan(jobName);

	Optimizer pc = new Optimizer(new Configuration());
	OptimizedPlan op = pc.compile(plan);

	JobGraphGenerator jgg = new JobGraphGenerator();
	JobGraph jobGraph = jgg.compileJobGraph(op);

	String jsonPlan = JsonPlanGenerator.generatePlan(jobGraph);

	// first check that the JSON is valid
	JsonParser parser = new JsonFactory().createJsonParser(jsonPlan);
	while (parser.nextToken() != null) {}

	validator.validateJson(jsonPlan);

	throw new AbortError();
}
 
Example 16
Source Project: flink   Source File: JobListenerITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testExecuteAsyncCallsJobListenerOnMainThreadOnStreamEnvironment() throws Exception {
	AtomicReference<Thread> threadReference = new AtomicReference<>();

	StreamExecutionEnvironment env = new StreamExecutionEnvironment(getClientConfiguration());

	env.registerJobListener(new JobListener() {
		@Override
		public void onJobSubmitted(JobClient jobClient, Throwable t) {
			threadReference.set(Thread.currentThread());
		}

		@Override
		public void onJobExecuted(JobExecutionResult jobExecutionResult, Throwable throwable) {
		}
	});

	env.fromElements(1, 2, 3, 4, 5).addSink(new DiscardingSink<>());
	env.executeAsync();

	assertThat(Thread.currentThread(), is(threadReference.get()));
}
 
Example 17
Source Project: flink   Source File: CollectionTestEnvironment.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public JobExecutionResult getLastJobExecutionResult() {
	if (lastEnv == null) {
		return this.lastJobExecutionResult;
	}
	else {
		return lastEnv.getLastJobExecutionResult();
	}
}
 
Example 18
Source Project: flink   Source File: TestEnvironment.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public JobExecutionResult getLastJobExecutionResult() {
	if (lastEnv == null) {
		return lastJobExecutionResult;
	}
	else {
		return lastEnv.getLastJobExecutionResult();
	}
}
 
Example 19
Source Project: flink   Source File: ExecutorDiscoveryAndJobClientTest.java    License: Apache License 2.0 5 votes vote down vote up
private void testHelper(final boolean attached) throws Exception {
	final Configuration configuration = new Configuration();
	configuration.set(DeploymentOptions.TARGET, EXEC_NAME);
	configuration.set(DeploymentOptions.ATTACHED, attached);

	final JobExecutionResult result = executeTestJobBasedOnConfig(configuration);
	assertThat(result.isJobExecutionResult(), is(attached));
}
 
Example 20
Source Project: flink   Source File: ExecutorDiscoveryAndJobClientTest.java    License: Apache License 2.0 5 votes vote down vote up
private void testHelper(final boolean attached) throws Exception {
	final Configuration configuration = new Configuration();
	configuration.set(DeploymentOptions.TARGET, EXEC_NAME);
	configuration.set(DeploymentOptions.ATTACHED, attached);

	final JobExecutionResult result = executeTestJobBasedOnConfig(configuration);

	assertThat(result.isJobExecutionResult(), is(attached));
}
 
Example 21
Source Project: beam   Source File: FlinkPipelineExecutionEnvironment.java    License: Apache License 2.0 5 votes vote down vote up
/** Launches the program execution. */
public JobExecutionResult executePipeline() throws Exception {
  final String jobName = options.getJobName();

  if (flinkBatchEnv != null) {
    return flinkBatchEnv.execute(jobName);
  } else if (flinkStreamEnv != null) {
    return flinkStreamEnv.execute(jobName);
  } else {
    throw new IllegalStateException("The Pipeline has not yet been translated.");
  }
}
 
Example 22
Source Project: Flink-CEPplus   Source File: AccumulatorErrorITCase.java    License: Apache License 2.0 5 votes vote down vote up
private static void assertAccumulatorsShouldFail(JobExecutionResult result) {
	try {
		result.getAllAccumulatorResults();
		fail("Should have failed");
	}
	catch (Exception ex) {
		assertTrue(ExceptionUtils.findThrowable(ex, CustomException.class).isPresent());
	}
}
 
Example 23
Source Project: Flink-CEPplus   Source File: MiscellaneousIssuesITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testAccumulatorsAfterNoOp() {

	final String accName = "test_accumulator";

	try {
		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(6);
		env.getConfig().disableSysoutLogging();

		env.generateSequence(1, 1000000)
				.rebalance()
				.flatMap(new RichFlatMapFunction<Long, Long>() {

					private LongCounter counter;

					@Override
					public void open(Configuration parameters) {
						counter = getRuntimeContext().getLongCounter(accName);
					}

					@Override
					public void flatMap(Long value, Collector<Long> out) {
						counter.add(1L);
					}
				})
				.output(new DiscardingOutputFormat<Long>());

		JobExecutionResult result = env.execute();

		assertEquals(1000000L, result.getAllAccumulatorResults().get(accName));
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 24
private void testProgram(
		final MiniClusterWithClientResource cluster,
		final int dataVolumeGb,
		final boolean useForwarder,
		final boolean isSlowSender,
		final boolean isSlowReceiver,
		final int parallelism) throws Exception {
	ClusterClient<?> client = cluster.getClusterClient();
	client.setDetached(false);
	client.setPrintStatusDuringExecution(false);

	JobExecutionResult jer = (JobExecutionResult) client.submitJob(
		createJobGraph(
			dataVolumeGb,
			useForwarder,
			isSlowSender,
			isSlowReceiver,
			parallelism),
		getClass().getClassLoader());

	long dataVolumeMbit = dataVolumeGb * 8192;
	long runtimeSecs = jer.getNetRuntime(TimeUnit.SECONDS);

	int mbitPerSecond = (int) (((double) dataVolumeMbit) / runtimeSecs);

	LOG.info(String.format("Test finished with throughput of %d MBit/s (runtime [secs]: %d, " +
		"data volume [gb/mbits]: %d/%d)", mbitPerSecond, runtimeSecs, dataVolumeGb, dataVolumeMbit));
}
 
Example 25
Source Project: flink   Source File: SessionWindowITCase.java    License: Apache License 2.0 5 votes vote down vote up
private void runTest(
		SourceFunction<SessionEvent<Integer, TestEventPayload>> dataSource,
		WindowFunction<SessionEvent<Integer, TestEventPayload>,
				String, Tuple, TimeWindow> windowFunction) throws Exception {

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	WindowedStream<SessionEvent<Integer, TestEventPayload>, Tuple, TimeWindow> windowedStream =
			env.addSource(dataSource).keyBy("sessionKey")
			.window(EventTimeSessionWindows.withGap(Time.milliseconds(MAX_SESSION_EVENT_GAP_MS)));

	if (ALLOWED_LATENESS_MS != Long.MAX_VALUE) {
		windowedStream = windowedStream.allowedLateness(Time.milliseconds(ALLOWED_LATENESS_MS));
	}

	if (PURGE_WINDOW_ON_FIRE) {
		windowedStream = windowedStream.trigger(PurgingTrigger.of(EventTimeTrigger.create()));
	}

	windowedStream.apply(windowFunction).print();
	JobExecutionResult result = env.execute();

	// check that overall event counts match with our expectations. remember that late events within lateness will
	// each trigger a window!
	Assert.assertEquals(
		(LATE_EVENTS_PER_SESSION + 1) * NUMBER_OF_SESSIONS * EVENTS_PER_SESSION,
		(long) result.getAccumulatorResult(SESSION_COUNTER_ON_TIME_KEY));
	Assert.assertEquals(
		NUMBER_OF_SESSIONS * (LATE_EVENTS_PER_SESSION * (LATE_EVENTS_PER_SESSION + 1) / 2),
		(long) result.getAccumulatorResult(SESSION_COUNTER_LATE_KEY));
}
 
Example 26
Source Project: Flink-CEPplus   Source File: SimpleRecoveryITCaseBase.java    License: Apache License 2.0 5 votes vote down vote up
private void executeAndRunAssertions(ExecutionEnvironment env) throws Exception {
	try {
		JobExecutionResult result = env.execute();
		assertTrue(result.getNetRuntime() >= 0);
		assertNotNull(result.getAllAccumulatorResults());
		assertTrue(result.getAllAccumulatorResults().isEmpty());
	}
	catch (JobExecutionException e) {
		fail("The program should have succeeded on the second run");
	}
}
 
Example 27
Source Project: flink   Source File: AccumulatorErrorITCase.java    License: Apache License 2.0 5 votes vote down vote up
private static void assertAccumulatorsShouldFail(JobExecutionResult result) {
	try {
		result.getAllAccumulatorResults();
		fail("Should have failed");
	}
	catch (Exception ex) {
		assertTrue(findThrowable(ex, CustomException.class).isPresent());
	}
}
 
Example 28
Source Project: flink   Source File: SessionWindowITCase.java    License: Apache License 2.0 5 votes vote down vote up
private void runTest(
		SourceFunction<SessionEvent<Integer, TestEventPayload>> dataSource,
		WindowFunction<SessionEvent<Integer, TestEventPayload>,
				String, Tuple, TimeWindow> windowFunction) throws Exception {

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	WindowedStream<SessionEvent<Integer, TestEventPayload>, Tuple, TimeWindow> windowedStream =
			env.addSource(dataSource).keyBy("sessionKey")
			.window(EventTimeSessionWindows.withGap(Time.milliseconds(MAX_SESSION_EVENT_GAP_MS)));

	if (ALLOWED_LATENESS_MS != Long.MAX_VALUE) {
		windowedStream = windowedStream.allowedLateness(Time.milliseconds(ALLOWED_LATENESS_MS));
	}

	if (PURGE_WINDOW_ON_FIRE) {
		windowedStream = windowedStream.trigger(PurgingTrigger.of(EventTimeTrigger.create()));
	}

	windowedStream.apply(windowFunction).print();
	JobExecutionResult result = env.execute();

	// check that overall event counts match with our expectations. remember that late events within lateness will
	// each trigger a window!
	Assert.assertEquals(
		(LATE_EVENTS_PER_SESSION + 1) * NUMBER_OF_SESSIONS * EVENTS_PER_SESSION,
		(long) result.getAccumulatorResult(SESSION_COUNTER_ON_TIME_KEY));
	Assert.assertEquals(
		NUMBER_OF_SESSIONS * (LATE_EVENTS_PER_SESSION * (LATE_EVENTS_PER_SESSION + 1) / 2),
		(long) result.getAccumulatorResult(SESSION_COUNTER_LATE_KEY));
}
 
Example 29
Source Project: Flink-CEPplus   Source File: DataSetUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Convenience method to get the count (number of elements) of a DataSet
 * as well as the checksum (sum over element hashes).
 *
 * @return A ChecksumHashCode that represents the count and checksum of elements in the data set.
 * @deprecated replaced with {@code org.apache.flink.graph.asm.dataset.ChecksumHashCode} in Gelly
 */
@Deprecated
public static <T> Utils.ChecksumHashCode checksumHashCode(DataSet<T> input) throws Exception {
	final String id = new AbstractID().toString();

	input.output(new Utils.ChecksumHashCodeHelper<T>(id)).name("ChecksumHashCode");

	JobExecutionResult res = input.getExecutionEnvironment().execute();
	return res.<Utils.ChecksumHashCode> getAccumulatorResult(id);
}
 
Example 30
Source Project: flink   Source File: DetachedEnvironment.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {
	Plan p = createProgramPlan(jobName);
	setDetachedPlan(ClusterClient.getOptimizedPlan(client.compiler, p, getParallelism()));
	LOG.warn("Job was executed in detached mode, the results will be available on completion.");
	this.lastJobExecutionResult = DetachedJobExecutionResult.INSTANCE;
	return this.lastJobExecutionResult;
}