org.apache.flink.api.common.accumulators.AccumulatorHelper Java Examples

The following examples show how to use org.apache.flink.api.common.accumulators.AccumulatorHelper. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AccumulatorITCase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
protected void postSubmit() throws Exception {
	compareResultsByLinesInMemory(EXPECTED, resultPath);

	// Test accumulator results
	System.out.println("Accumulator results:");
	JobExecutionResult res = this.result;
	System.out.println(AccumulatorHelper.getResultsFormatted(res.getAllAccumulatorResults()));

	Assert.assertEquals(Integer.valueOf(3), res.getAccumulatorResult("num-lines"));
	Assert.assertEquals(Integer.valueOf(3), res.getIntCounterResult("num-lines"));

	Assert.assertEquals(Double.valueOf(getParallelism()), res.getAccumulatorResult("open-close-counter"));

	// Test histogram (words per line distribution)
	Map<Integer, Integer> dist = new HashMap<>();
	dist.put(1, 1); dist.put(2, 1); dist.put(3, 1);
	Assert.assertEquals(dist, res.getAccumulatorResult("words-per-line"));

	// Test distinct words (custom accumulator)
	Set<StringValue> distinctWords = new HashSet<>();
	distinctWords.add(new StringValue("one"));
	distinctWords.add(new StringValue("two"));
	distinctWords.add(new StringValue("three"));
	Assert.assertEquals(distinctWords, res.getAccumulatorResult("distinct-words"));
}
 
Example #2
Source File: CliFrontend.java    From flink with Apache License 2.0 6 votes vote down vote up
protected void executeProgram(PackagedProgram program, ClusterClient<?> client, int parallelism) throws ProgramMissingJobException, ProgramInvocationException {
	logAndSysout("Starting execution of program");

	final JobSubmissionResult result = client.run(program, parallelism);

	if (null == result) {
		throw new ProgramMissingJobException("No JobSubmissionResult returned, please make sure you called " +
			"ExecutionEnvironment.execute()");
	}

	if (result.isJobExecutionResult()) {
		logAndSysout("Program execution finished");
		JobExecutionResult execResult = result.getJobExecutionResult();
		System.out.println("Job with JobID " + execResult.getJobID() + " has finished.");
		System.out.println("Job Runtime: " + execResult.getNetRuntime() + " ms");
		Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults();
		if (accumulatorsResult.size() > 0) {
			System.out.println("Accumulator Results: ");
			System.out.println(AccumulatorHelper.getResultsFormatted(accumulatorsResult));
		}
	} else {
		logAndSysout("Job has been submitted with JobID " + result.getJobID());
	}
}
 
Example #3
Source File: AccumulatorITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected void postSubmit() throws Exception {
	compareResultsByLinesInMemory(EXPECTED, resultPath);

	// Test accumulator results
	System.out.println("Accumulator results:");
	JobExecutionResult res = this.result;
	System.out.println(AccumulatorHelper.getResultsFormatted(res.getAllAccumulatorResults()));

	Assert.assertEquals(Integer.valueOf(3), res.getAccumulatorResult("num-lines"));
	Assert.assertEquals(Integer.valueOf(3), res.getIntCounterResult("num-lines"));

	Assert.assertEquals(Double.valueOf(getParallelism()), res.getAccumulatorResult("open-close-counter"));

	// Test histogram (words per line distribution)
	Map<Integer, Integer> dist = new HashMap<>();
	dist.put(1, 1); dist.put(2, 1); dist.put(3, 1);
	Assert.assertEquals(dist, res.getAccumulatorResult("words-per-line"));

	// Test distinct words (custom accumulator)
	Set<StringValue> distinctWords = new HashSet<>();
	distinctWords.add(new StringValue("one"));
	distinctWords.add(new StringValue("two"));
	distinctWords.add(new StringValue("three"));
	Assert.assertEquals(distinctWords, res.getAccumulatorResult("distinct-words"));
}
 
Example #4
Source File: AbstractRuntimeUDFContext.java    From flink with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private <V, A extends Serializable> Accumulator<V, A> getAccumulator(String name,
		Class<? extends Accumulator<V, A>> accumulatorClass) {

	Accumulator<?, ?> accumulator = accumulators.get(name);

	if (accumulator != null) {
		AccumulatorHelper.compareAccumulatorTypes(name, accumulator.getClass(), accumulatorClass);
	} else {
		// Create new accumulator
		try {
			accumulator = accumulatorClass.newInstance();
		}
		catch (Exception e) {
			throw new RuntimeException("Cannot create accumulator " + accumulatorClass.getName());
		}
		accumulators.put(name, accumulator);
	}
	return (Accumulator<V, A>) accumulator;
}
 
Example #5
Source File: AbstractRuntimeUDFContext.java    From flink with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private <V, A extends Serializable> Accumulator<V, A> getAccumulator(String name,
		Class<? extends Accumulator<V, A>> accumulatorClass) {

	Accumulator<?, ?> accumulator = accumulators.get(name);

	if (accumulator != null) {
		AccumulatorHelper.compareAccumulatorTypes(name, accumulator.getClass(), accumulatorClass);
	} else {
		// Create new accumulator
		try {
			accumulator = accumulatorClass.newInstance();
		}
		catch (Exception e) {
			throw new RuntimeException("Cannot create accumulator " + accumulatorClass.getName());
		}
		accumulators.put(name, accumulator);
	}
	return (Accumulator<V, A>) accumulator;
}
 
Example #6
Source File: AccumulatorITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected void postSubmit() throws Exception {
	compareResultsByLinesInMemory(EXPECTED, resultPath);

	// Test accumulator results
	System.out.println("Accumulator results:");
	JobExecutionResult res = this.result;
	System.out.println(AccumulatorHelper.getResultsFormatted(res.getAllAccumulatorResults()));

	Assert.assertEquals(Integer.valueOf(3), res.getAccumulatorResult("num-lines"));
	Assert.assertEquals(Integer.valueOf(3), res.getIntCounterResult("num-lines"));

	Assert.assertEquals(Double.valueOf(getParallelism()), res.getAccumulatorResult("open-close-counter"));

	// Test histogram (words per line distribution)
	Map<Integer, Integer> dist = new HashMap<>();
	dist.put(1, 1); dist.put(2, 1); dist.put(3, 1);
	Assert.assertEquals(dist, res.getAccumulatorResult("words-per-line"));

	// Test distinct words (custom accumulator)
	Set<StringValue> distinctWords = new HashSet<>();
	distinctWords.add(new StringValue("one"));
	distinctWords.add(new StringValue("two"));
	distinctWords.add(new StringValue("three"));
	Assert.assertEquals(distinctWords, res.getAccumulatorResult("distinct-words"));
}
 
Example #7
Source File: CliFrontend.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
protected void executeProgram(PackagedProgram program, ClusterClient<?> client, int parallelism) throws ProgramMissingJobException, ProgramInvocationException {
	logAndSysout("Starting execution of program");

	final JobSubmissionResult result = client.run(program, parallelism);

	if (null == result) {
		throw new ProgramMissingJobException("No JobSubmissionResult returned, please make sure you called " +
			"ExecutionEnvironment.execute()");
	}

	if (result.isJobExecutionResult()) {
		logAndSysout("Program execution finished");
		JobExecutionResult execResult = result.getJobExecutionResult();
		System.out.println("Job with JobID " + execResult.getJobID() + " has finished.");
		System.out.println("Job Runtime: " + execResult.getNetRuntime() + " ms");
		Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults();
		if (accumulatorsResult.size() > 0) {
			System.out.println("Accumulator Results: ");
			System.out.println(AccumulatorHelper.getResultsFormatted(accumulatorsResult));
		}
	} else {
		logAndSysout("Job has been submitted with JobID " + result.getJobID());
	}
}
 
Example #8
Source File: ClusterClient.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Requests and returns the accumulators for the given job identifier. Accumulators can be
 * requested while a is running or after it has finished.
 * @param jobID The job identifier of a job.
 * @param loader The class loader for deserializing the accumulator results.
 * @return A Map containing the accumulator's name and its value.
 */
public Map<String, OptionalFailure<Object>> getAccumulators(JobID jobID, ClassLoader loader) throws Exception {
	ActorGateway jobManagerGateway = getJobManagerGateway();

	Future<Object> response;
	try {
		response = jobManagerGateway.ask(new RequestAccumulatorResults(jobID), timeout);
	} catch (Exception e) {
		throw new Exception("Failed to query the job manager gateway for accumulators.", e);
	}

	Object result = Await.result(response, timeout);

	if (result instanceof AccumulatorResultsFound) {
		Map<String, SerializedValue<OptionalFailure<Object>>> serializedAccumulators =
				((AccumulatorResultsFound) result).result();

		return AccumulatorHelper.deserializeAccumulators(serializedAccumulators, loader);

	} else if (result instanceof AccumulatorResultsErroneous) {
		throw ((AccumulatorResultsErroneous) result).cause();
	} else {
		throw new Exception("Failed to fetch accumulators for the job " + jobID + ".");
	}
}
 
Example #9
Source File: RestClusterClient.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<Map<String, Object>> getAccumulators(JobID jobID, ClassLoader loader) {
	final JobAccumulatorsHeaders accumulatorsHeaders = JobAccumulatorsHeaders.getInstance();
	final JobAccumulatorsMessageParameters accMsgParams = accumulatorsHeaders.getUnresolvedMessageParameters();
	accMsgParams.jobPathParameter.resolve(jobID);
	accMsgParams.includeSerializedAccumulatorsParameter.resolve(Collections.singletonList(true));

	CompletableFuture<JobAccumulatorsInfo> responseFuture = sendRequest(
		accumulatorsHeaders,
		accMsgParams);

	return responseFuture
		.thenApply(JobAccumulatorsInfo::getSerializedUserAccumulators)
		.thenApply(accumulators -> {
			try {
				return AccumulatorHelper.deserializeAndUnwrapAccumulators(accumulators, loader);
			} catch (Exception e) {
				throw new CompletionException("Cannot deserialize and unwrap accumulators properly.", e);
			}
		});
}
 
Example #10
Source File: AbstractRuntimeUDFContext.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private <V, A extends Serializable> Accumulator<V, A> getAccumulator(String name,
		Class<? extends Accumulator<V, A>> accumulatorClass) {

	Accumulator<?, ?> accumulator = accumulators.get(name);

	if (accumulator != null) {
		AccumulatorHelper.compareAccumulatorTypes(name, accumulator.getClass(), accumulatorClass);
	} else {
		// Create new accumulator
		try {
			accumulator = accumulatorClass.newInstance();
		}
		catch (Exception e) {
			throw new RuntimeException("Cannot create accumulator " + accumulatorClass.getName());
		}
		accumulators.put(name, accumulator);
	}
	return (Accumulator<V, A>) accumulator;
}
 
Example #11
Source File: ExecutionJobVertex.java    From flink with Apache License 2.0 5 votes vote down vote up
public StringifiedAccumulatorResult[] getAggregatedUserAccumulatorsStringified() {
	Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>();

	for (ExecutionVertex vertex : taskVertices) {
		Map<String, Accumulator<?, ?>> next = vertex.getCurrentExecutionAttempt().getUserAccumulators();
		if (next != null) {
			AccumulatorHelper.mergeInto(userAccumulators, next);
		}
	}

	return StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);
}
 
Example #12
Source File: EmbeddedJobClient.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<Map<String, Object>> getAccumulators(final ClassLoader classLoader) {
	checkNotNull(classLoader);

	return dispatcherGateway.requestJob(jobId, timeout)
			.thenApply(ArchivedExecutionGraph::getAccumulatorsSerialized)
			.thenApply(accumulators -> {
				try {
					return AccumulatorHelper.deserializeAndUnwrapAccumulators(accumulators, classLoader);
				} catch (Exception e) {
					throw new CompletionException("Cannot deserialize and unwrap accumulators properly.", e);
				}
			});
}
 
Example #13
Source File: ExecutionGraph.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Merges all accumulator results from the tasks previously executed in the Executions.
 * @return The accumulator map
 */
public Map<String, OptionalFailure<Accumulator<?, ?>>> aggregateUserAccumulators() {

	Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>();

	for (ExecutionVertex vertex : getAllExecutionVertices()) {
		Map<String, Accumulator<?, ?>> next = vertex.getCurrentExecutionAttempt().getUserAccumulators();
		if (next != null) {
			AccumulatorHelper.mergeInto(userAccumulators, next);
		}
	}

	return userAccumulators;
}
 
Example #14
Source File: MiniClusterClient.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<Map<String, Object>> getAccumulators(JobID jobID, ClassLoader loader) {
	return miniCluster
		.getExecutionGraph(jobID)
		.thenApply(AccessExecutionGraph::getAccumulatorsSerialized)
		.thenApply(accumulators -> {
			try {
				return AccumulatorHelper.deserializeAndUnwrapAccumulators(accumulators, loader);
			} catch (Exception e) {
				throw new CompletionException("Cannot deserialize and unwrap accumulators properly.", e);
			}
		});
}
 
Example #15
Source File: CollectionExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
public JobExecutionResult execute(Plan program) throws Exception {
	long startTime = System.currentTimeMillis();

	initCache(program.getCachedFiles());
	Collection<? extends GenericDataSinkBase<?>> sinks = program.getDataSinks();
	for (Operator<?> sink : sinks) {
		execute(sink);
	}
	
	long endTime = System.currentTimeMillis();
	Map<String, OptionalFailure<Object>> accumulatorResults = AccumulatorHelper.toResultMap(accumulators);
	return new JobExecutionResult(null, endTime - startTime, accumulatorResults);
}
 
Example #16
Source File: JobExecutionResult.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public String toString() {
	final StringBuilder result = new StringBuilder();
	result.append("Program execution finished").append("\n");
	result.append("Job with JobID ").append(getJobID()).append(" has finished.").append("\n");
	result.append("Job Runtime: ").append(getNetRuntime()).append(" ms").append("\n");

	final Map<String, Object> accumulatorsResult = getAllAccumulatorResults();
	if (accumulatorsResult.size() > 0) {
		result.append("Accumulator Results: ").append("\n");
		result.append(AccumulatorHelper.getResultsFormatted(accumulatorsResult)).append("\n");
	}

	return result.toString();
}
 
Example #17
Source File: CollectionExecutor.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public JobExecutionResult execute(Plan program) throws Exception {
	long startTime = System.currentTimeMillis();

	initCache(program.getCachedFiles());
	Collection<? extends GenericDataSinkBase<?>> sinks = program.getDataSinks();
	for (Operator<?> sink : sinks) {
		execute(sink);
	}
	
	long endTime = System.currentTimeMillis();
	Map<String, OptionalFailure<Object>> accumulatorResults = AccumulatorHelper.toResultMap(accumulators);
	return new JobExecutionResult(null, endTime - startTime, accumulatorResults);
}
 
Example #18
Source File: FlinkAbstractParDoWrapper.java    From flink-dataflow with Apache License 2.0 5 votes vote down vote up
@Override
protected <AggInputT, AggOutputT> Aggregator<AggInputT, AggOutputT> createAggregatorInternal(String name, Combine.CombineFn<AggInputT, ?, AggOutputT> combiner) {
	Accumulator acc = getRuntimeContext().getAccumulator(name);
	if (acc != null) {
		AccumulatorHelper.compareAccumulatorTypes(name,
				SerializableFnAggregatorWrapper.class, acc.getClass());
		return (Aggregator<AggInputT, AggOutputT>) acc;
	}

	SerializableFnAggregatorWrapper<AggInputT, AggOutputT> accumulator =
			new SerializableFnAggregatorWrapper<>(combiner);
	getRuntimeContext().addAccumulator(name, accumulator);
	return accumulator;
}
 
Example #19
Source File: FlinkGroupAlsoByWindowWrapper.java    From flink-dataflow with Apache License 2.0 5 votes vote down vote up
@Override
protected <AggInputT, AggOutputT> Aggregator<AggInputT, AggOutputT> createAggregatorInternal(String name, Combine.CombineFn<AggInputT, ?, AggOutputT> combiner) {
	Accumulator acc = getRuntimeContext().getAccumulator(name);
	if (acc != null) {
		AccumulatorHelper.compareAccumulatorTypes(name,
				SerializableFnAggregatorWrapper.class, acc.getClass());
		return (Aggregator<AggInputT, AggOutputT>) acc;
	}

	SerializableFnAggregatorWrapper<AggInputT, AggOutputT> accumulator =
			new SerializableFnAggregatorWrapper<>(combiner);
	getRuntimeContext().addAccumulator(name, accumulator);
	return accumulator;
}
 
Example #20
Source File: RestClusterClient.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, OptionalFailure<Object>> getAccumulators(final JobID jobID, ClassLoader loader) throws Exception {
	final JobAccumulatorsHeaders accumulatorsHeaders = JobAccumulatorsHeaders.getInstance();
	final JobAccumulatorsMessageParameters accMsgParams = accumulatorsHeaders.getUnresolvedMessageParameters();
	accMsgParams.jobPathParameter.resolve(jobID);
	accMsgParams.includeSerializedAccumulatorsParameter.resolve(Collections.singletonList(true));

	CompletableFuture<JobAccumulatorsInfo> responseFuture = sendRequest(
		accumulatorsHeaders,
		accMsgParams);

	Map<String, OptionalFailure<Object>> result = Collections.emptyMap();

	try {
		result = responseFuture.thenApply((JobAccumulatorsInfo accumulatorsInfo) -> {
			try {
				return AccumulatorHelper.deserializeAccumulators(
					accumulatorsInfo.getSerializedUserAccumulators(),
					loader);
			} catch (Exception e) {
				throw new CompletionException(
					new FlinkException(
						String.format("Deserialization of accumulators for job %s failed.", jobID),
						e));
			}
		}).get(timeout.toMillis(), TimeUnit.MILLISECONDS);
	} catch (ExecutionException ee) {
		ExceptionUtils.rethrowException(ExceptionUtils.stripExecutionException(ee));
	}

	return result;
}
 
Example #21
Source File: ExecutionJobVertex.java    From flink with Apache License 2.0 5 votes vote down vote up
public StringifiedAccumulatorResult[] getAggregatedUserAccumulatorsStringified() {
	Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>();

	for (ExecutionVertex vertex : taskVertices) {
		Map<String, Accumulator<?, ?>> next = vertex.getCurrentExecutionAttempt().getUserAccumulators();
		if (next != null) {
			AccumulatorHelper.mergeInto(userAccumulators, next);
		}
	}

	return StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);
}
 
Example #22
Source File: ExecutionGraph.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Merges all accumulator results from the tasks previously executed in the Executions.
 * @return The accumulator map
 */
public Map<String, OptionalFailure<Accumulator<?, ?>>> aggregateUserAccumulators() {

	Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>();

	for (ExecutionVertex vertex : getAllExecutionVertices()) {
		Map<String, Accumulator<?, ?>> next = vertex.getCurrentExecutionAttempt().getUserAccumulators();
		if (next != null) {
			AccumulatorHelper.mergeInto(userAccumulators, next);
		}
	}

	return userAccumulators;
}
 
Example #23
Source File: CollectionExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
public JobExecutionResult execute(Plan program) throws Exception {
	long startTime = System.currentTimeMillis();

	initCache(program.getCachedFiles());
	Collection<? extends GenericDataSinkBase<?>> sinks = program.getDataSinks();
	for (Operator<?> sink : sinks) {
		execute(sink);
	}
	
	long endTime = System.currentTimeMillis();
	Map<String, OptionalFailure<Object>> accumulatorResults = AccumulatorHelper.toResultMap(accumulators);
	return new JobExecutionResult(null, endTime - startTime, accumulatorResults);
}
 
Example #24
Source File: RestClusterClient.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, OptionalFailure<Object>> getAccumulators(final JobID jobID, ClassLoader loader) throws Exception {
	final JobAccumulatorsHeaders accumulatorsHeaders = JobAccumulatorsHeaders.getInstance();
	final JobAccumulatorsMessageParameters accMsgParams = accumulatorsHeaders.getUnresolvedMessageParameters();
	accMsgParams.jobPathParameter.resolve(jobID);
	accMsgParams.includeSerializedAccumulatorsParameter.resolve(Collections.singletonList(true));

	CompletableFuture<JobAccumulatorsInfo> responseFuture = sendRequest(
		accumulatorsHeaders,
		accMsgParams);

	Map<String, OptionalFailure<Object>> result = Collections.emptyMap();

	try {
		result = responseFuture.thenApply((JobAccumulatorsInfo accumulatorsInfo) -> {
			try {
				return AccumulatorHelper.deserializeAccumulators(
					accumulatorsInfo.getSerializedUserAccumulators(),
					loader);
			} catch (Exception e) {
				throw new CompletionException(
					new FlinkException(
						String.format("Deserialization of accumulators for job %s failed.", jobID),
						e));
			}
		}).get(timeout.toMillis(), TimeUnit.MILLISECONDS);
	} catch (ExecutionException ee) {
		ExceptionUtils.rethrowException(ExceptionUtils.stripExecutionException(ee));
	}

	return result;
}
 
Example #25
Source File: ExecutionJobVertex.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public StringifiedAccumulatorResult[] getAggregatedUserAccumulatorsStringified() {
	Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>();

	for (ExecutionVertex vertex : taskVertices) {
		Map<String, Accumulator<?, ?>> next = vertex.getCurrentExecutionAttempt().getUserAccumulators();
		if (next != null) {
			AccumulatorHelper.mergeInto(userAccumulators, next);
		}
	}

	return StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);
}
 
Example #26
Source File: ExecutionGraph.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Merges all accumulator results from the tasks previously executed in the Executions.
 * @return The accumulator map
 */
public Map<String, OptionalFailure<Accumulator<?, ?>>> aggregateUserAccumulators() {

	Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>();

	for (ExecutionVertex vertex : getAllExecutionVertices()) {
		Map<String, Accumulator<?, ?>> next = vertex.getCurrentExecutionAttempt().getUserAccumulators();
		if (next != null) {
			AccumulatorHelper.mergeInto(userAccumulators, next);
		}
	}

	return userAccumulators;
}
 
Example #27
Source File: SerializedJobExecutionResult.java    From flink with Apache License 2.0 4 votes vote down vote up
public JobExecutionResult toJobExecutionResult(ClassLoader loader) throws IOException, ClassNotFoundException {
	Map<String, OptionalFailure<Object>> accumulators =
			AccumulatorHelper.deserializeAccumulators(accumulatorResults, loader);

	return new JobExecutionResult(jobId, netRuntime, accumulators);
}
 
Example #28
Source File: SerializedJobExecutionResult.java    From flink with Apache License 2.0 4 votes vote down vote up
public JobExecutionResult toJobExecutionResult(ClassLoader loader) throws IOException, ClassNotFoundException {
	Map<String, OptionalFailure<Object>> accumulators =
			AccumulatorHelper.deserializeAccumulators(accumulatorResults, loader);

	return new JobExecutionResult(jobId, netRuntime, accumulators);
}
 
Example #29
Source File: SerializedJobExecutionResult.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public JobExecutionResult toJobExecutionResult(ClassLoader loader) throws IOException, ClassNotFoundException {
	Map<String, OptionalFailure<Object>> accumulators =
			AccumulatorHelper.deserializeAccumulators(accumulatorResults, loader);

	return new JobExecutionResult(jobId, netRuntime, accumulators);
}
 
Example #30
Source File: GrepJobOptimized.java    From flink-perf with Apache License 2.0 4 votes vote down vote up
public static void main(final String[] args) throws Exception {
	// set up the execution environment
	final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	String in = args[0];
	String out = args[1];
	System.err.println("Using input=" + in);
	System.err.println("Using output=" + out);

	String patterns[] = new String[args.length - 2];
	System.arraycopy(args, 2, patterns, 0, args.length - 2);
	System.err.println("Using patterns: " + Arrays.toString(patterns));

	// get input data
	DataSet<StringValue> text = env.createInput(new TextValueInputFormat(new Path(in)));

	for (int p = 0; p < patterns.length; p++) {
		final String pattern = patterns[p];
		DataSet<StringValue> res = text.filter(new RichFilterFunction<StringValue>() {
			Pattern p = Pattern.compile(pattern);

			@Override
			public void open(Configuration parameters) throws Exception {
				super.open(parameters);
			}

			@Override
			public boolean filter(StringValue valueIn) throws Exception {
				final String value = valueIn.getValue();
				if (value == null || value.length() == 0) {
					return false;
				}
				final Matcher m = p.matcher(value);
				if (m.find()) {
					return true;
				}
				return false;
			}
		}).name("grep for " + pattern);
		res.writeAsText(out + "_" + pattern, FileSystem.WriteMode.OVERWRITE);
	}

	// execute program
	JobExecutionResult jobResult = env.execute("Flink Grep benchmark");
	System.err.println(AccumulatorHelper.getResultsFormated(jobResult.getAllAccumulatorResults()));
}