org.apache.flink.api.common.accumulators.Accumulator Java Examples

The following examples show how to use org.apache.flink.api.common.accumulators.Accumulator. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ExecutionGraph.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Deserializes accumulators from a task state update.
 *
 * <p>This method never throws an exception!
 *
 * @param state The task execution state from which to deserialize the accumulators.
 * @return The deserialized accumulators, of null, if there are no accumulators or an error occurred.
 */
private Map<String, Accumulator<?, ?>> deserializeAccumulators(TaskExecutionState state) {
	AccumulatorSnapshot serializedAccumulators = state.getAccumulators();

	if (serializedAccumulators != null) {
		try {
			return serializedAccumulators.deserializeUserAccumulators(userClassLoader);
		}
		catch (Throwable t) {
			// we catch Throwable here to include all form of linking errors that may
			// occur if user classes are missing in the classpath
			LOG.error("Failed to deserialize final accumulator results.", t);
		}
	}
	return null;
}
 
Example #2
Source File: FlatMapOperatorCollectionTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private void testExecuteOnCollection(FlatMapFunction<String, String> udf, List<String> input, boolean mutableSafe) throws Exception {
	ExecutionConfig executionConfig = new ExecutionConfig();
	if (mutableSafe) {
		executionConfig.disableObjectReuse();
	} else {
		executionConfig.enableObjectReuse();
	}
	final TaskInfo taskInfo = new TaskInfo("Test UDF", 4, 0, 4, 0);
	// run on collections
	final List<String> result = getTestFlatMapOperator(udf)
			.executeOnCollections(input,
					new RuntimeUDFContext(
						taskInfo,  null, executionConfig, new HashMap<String, Future<Path>>(),
						new HashMap<String, Accumulator<?, ?>>(), new UnregisteredMetricsGroup()),
					executionConfig);

	Assert.assertEquals(input.size(), result.size());
	Assert.assertEquals(input, result);
}
 
Example #3
Source File: StreamingRuntimeContextTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testValueStateInstantiation() throws Exception {

	final ExecutionConfig config = new ExecutionConfig();
	config.registerKryoType(Path.class);

	final AtomicReference<Object> descriptorCapture = new AtomicReference<>();

	StreamingRuntimeContext context = new StreamingRuntimeContext(
			createDescriptorCapturingMockOp(descriptorCapture, config),
			createMockEnvironment(),
			Collections.<String, Accumulator<?, ?>>emptyMap());

	ValueStateDescriptor<TaskInfo> descr = new ValueStateDescriptor<>("name", TaskInfo.class);
	context.getState(descr);

	StateDescriptor<?, ?> descrIntercepted = (StateDescriptor<?, ?>) descriptorCapture.get();
	TypeSerializer<?> serializer = descrIntercepted.getSerializer();

	// check that the Path class is really registered, i.e., the execution config was applied
	assertTrue(serializer instanceof KryoSerializer);
	assertTrue(((KryoSerializer<?>) serializer).getKryo().getRegistration(Path.class).getId() > 0);
}
 
Example #4
Source File: StringifiedAccumulatorResultTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void stringifyingResultsShouldReportNullLocalValueAsNonnullValueString() {
	final String name = "a";
	final NullBearingAccumulator acc = new NullBearingAccumulator();
	final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>();
	accumulatorMap.put(name, OptionalFailure.of(acc));

	final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap);

	assertEquals(1, results.length);

	// Note the use of a String with a content of "null" rather than a null value
	final StringifiedAccumulatorResult firstResult = results[0];
	assertEquals(name, firstResult.getName());
	assertEquals("NullBearingAccumulator", firstResult.getType());
	assertEquals("null", firstResult.getValue());
}
 
Example #5
Source File: FlatMapOperatorCollectionTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private void testExecuteOnCollection(FlatMapFunction<String, String> udf, List<String> input, boolean mutableSafe) throws Exception {
	ExecutionConfig executionConfig = new ExecutionConfig();
	if (mutableSafe) {
		executionConfig.disableObjectReuse();
	} else {
		executionConfig.enableObjectReuse();
	}
	final TaskInfo taskInfo = new TaskInfo("Test UDF", 4, 0, 4, 0);
	// run on collections
	final List<String> result = getTestFlatMapOperator(udf)
			.executeOnCollections(input,
					new RuntimeUDFContext(
						taskInfo,  null, executionConfig, new HashMap<String, Future<Path>>(),
						new HashMap<String, Accumulator<?, ?>>(), new UnregisteredMetricsGroup()),
					executionConfig);

	Assert.assertEquals(input.size(), result.size());
	Assert.assertEquals(input, result);
}
 
Example #6
Source File: StringifiedAccumulatorResultTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void stringifyingFailureResults() {
	final String name = "a";
	final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>();
	accumulatorMap.put(name, OptionalFailure.ofFailure(new FlinkRuntimeException("Test")));

	final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap);

	assertEquals(1, results.length);

	// Note the use of String values with content of "null" rather than null values
	final StringifiedAccumulatorResult firstResult = results[0];
	assertEquals(name, firstResult.getName());
	assertEquals("null", firstResult.getType());
	assertTrue(firstResult.getValue().startsWith("org.apache.flink.util.FlinkRuntimeException: Test"));
}
 
Example #7
Source File: OuterJoinOperatorBaseTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({"rawtypes", "unchecked"})
@Before
public void setup() {
	joiner = new MockRichFlatJoinFunction();

	baseOperator =
		new OuterJoinOperatorBase(joiner,
			new BinaryOperatorInformation(BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO,
				BasicTypeInfo.STRING_TYPE_INFO), new int[0], new int[0], "TestJoiner", null);

	executionConfig = new ExecutionConfig();

	String taskName = "Test rich outer join function";
	TaskInfo taskInfo = new TaskInfo(taskName, 1, 0, 1, 0);
	HashMap<String, Accumulator<?, ?>> accumulatorMap = new HashMap<>();
	HashMap<String, Future<Path>> cpTasks = new HashMap<>();

	runtimeContext = new RuntimeUDFContext(taskInfo, null, executionConfig, cpTasks,
		accumulatorMap, new UnregisteredMetricsGroup());
}
 
Example #8
Source File: StreamingRuntimeContextTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testValueStateInstantiation() throws Exception {

	final ExecutionConfig config = new ExecutionConfig();
	config.registerKryoType(Path.class);

	final AtomicReference<Object> descriptorCapture = new AtomicReference<>();

	StreamingRuntimeContext context = new StreamingRuntimeContext(
			createDescriptorCapturingMockOp(descriptorCapture, config),
			createMockEnvironment(),
			Collections.<String, Accumulator<?, ?>>emptyMap());

	ValueStateDescriptor<TaskInfo> descr = new ValueStateDescriptor<>("name", TaskInfo.class);
	context.getState(descr);

	StateDescriptor<?, ?> descrIntercepted = (StateDescriptor<?, ?>) descriptorCapture.get();
	TypeSerializer<?> serializer = descrIntercepted.getSerializer();

	// check that the Path class is really registered, i.e., the execution config was applied
	assertTrue(serializer instanceof KryoSerializer);
	assertTrue(((KryoSerializer<?>) serializer).getKryo().getRegistration(Path.class).getId() > 0);
}
 
Example #9
Source File: StringifiedAccumulatorResultTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void stringifyingFailureResults() {
	final String name = "a";
	final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>();
	accumulatorMap.put(name, OptionalFailure.ofFailure(new FlinkRuntimeException("Test")));

	final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap);

	assertEquals(1, results.length);

	// Note the use of String values with content of "null" rather than null values
	final StringifiedAccumulatorResult firstResult = results[0];
	assertEquals(name, firstResult.getName());
	assertEquals("null", firstResult.getType());
	assertTrue(firstResult.getValue().startsWith("org.apache.flink.util.FlinkRuntimeException: Test"));
}
 
Example #10
Source File: ExecutionGraph.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Updates the accumulators during the runtime of a job. Final accumulator results are transferred
 * through the UpdateTaskExecutionState message.
 * @param accumulatorSnapshot The serialized flink and user-defined accumulators
 */
public void updateAccumulators(AccumulatorSnapshot accumulatorSnapshot) {
	Map<String, Accumulator<?, ?>> userAccumulators;
	try {
		userAccumulators = accumulatorSnapshot.deserializeUserAccumulators(userClassLoader);

		ExecutionAttemptID execID = accumulatorSnapshot.getExecutionAttemptID();
		Execution execution = currentExecutions.get(execID);
		if (execution != null) {
			execution.setAccumulators(userAccumulators);
		} else {
			LOG.debug("Received accumulator result for unknown execution {}.", execID);
		}
	} catch (Exception e) {
		LOG.error("Cannot update accumulators for job {}.", getJobID(), e);
	}
}
 
Example #11
Source File: FlinkGroupAlsoByWindowWrapper.java    From flink-dataflow with Apache License 2.0 5 votes vote down vote up
@Override
protected <AggInputT, AggOutputT> Aggregator<AggInputT, AggOutputT> createAggregatorInternal(String name, Combine.CombineFn<AggInputT, ?, AggOutputT> combiner) {
	Accumulator acc = getRuntimeContext().getAccumulator(name);
	if (acc != null) {
		AccumulatorHelper.compareAccumulatorTypes(name,
				SerializableFnAggregatorWrapper.class, acc.getClass());
		return (Aggregator<AggInputT, AggOutputT>) acc;
	}

	SerializableFnAggregatorWrapper<AggInputT, AggOutputT> accumulator =
			new SerializableFnAggregatorWrapper<>(combiner);
	getRuntimeContext().addAccumulator(name, accumulator);
	return accumulator;
}
 
Example #12
Source File: Execution.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Update accumulators (discarded when the Execution has already been terminated).
 * @param userAccumulators the user accumulators
 */
public void setAccumulators(Map<String, Accumulator<?, ?>> userAccumulators) {
	synchronized (accumulatorLock) {
		if (!state.isTerminal()) {
			this.userAccumulators = userAccumulators;
		}
	}
}
 
Example #13
Source File: SortedStringAccumulator.java    From timely with Apache License 2.0 5 votes vote down vote up
@Override
public void merge(Accumulator<String, ConcurrentSkipListMap<String, MutableLong>> other) {
    other.getLocalValue().forEach((k, v) -> {
        if (values.containsKey(k)) {
            values.get(k).add(v.longValue());
        } else {
            values.put(k, v);
        }
    });
}
 
Example #14
Source File: Execution.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Update accumulators (discarded when the Execution has already been terminated).
 * @param userAccumulators the user accumulators
 */
public void setAccumulators(Map<String, Accumulator<?, ?>> userAccumulators) {
	synchronized (accumulatorLock) {
		if (!state.isTerminal()) {
			this.userAccumulators = userAccumulators;
		}
	}
}
 
Example #15
Source File: CollectionExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
public CollectionExecutor(ExecutionConfig executionConfig) {
	this.executionConfig = executionConfig;
	
	this.intermediateResults = new HashMap<Operator<?>, List<?>>();
	this.accumulators = new HashMap<String, Accumulator<?,?>>();
	this.previousAggregates = new HashMap<String, Value>();
	this.aggregators = new HashMap<String, Aggregator<?>>();
	this.cachedFiles = new HashMap<String, Future<Path>>();
	this.userCodeClassLoader = Thread.currentThread().getContextClassLoader();
}
 
Example #16
Source File: ExecutionGraph.java    From flink with Apache License 2.0 5 votes vote down vote up
private boolean updateStateInternal(final TaskExecutionState state, final Execution attempt) {
	Map<String, Accumulator<?, ?>> accumulators;

	switch (state.getExecutionState()) {
		case RUNNING:
			return attempt.switchToRunning();

		case FINISHED:
			// this deserialization is exception-free
			accumulators = deserializeAccumulators(state);
			attempt.markFinished(accumulators, state.getIOMetrics());
			return true;

		case CANCELED:
			// this deserialization is exception-free
			accumulators = deserializeAccumulators(state);
			attempt.completeCancelling(accumulators, state.getIOMetrics(), false);
			return true;

		case FAILED:
			// this deserialization is exception-free
			accumulators = deserializeAccumulators(state);
			attempt.markFailed(state.getError(userClassLoader), accumulators, state.getIOMetrics());
			return true;

		default:
			// we mark as failed and return false, which triggers the TaskManager
			// to remove the task
			attempt.fail(new Exception("TaskManager sent illegal state update: " + state.getExecutionState()));
			return false;
	}
}
 
Example #17
Source File: StreamingRuntimeContextTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testReducingStateInstantiation() throws Exception {

	final ExecutionConfig config = new ExecutionConfig();
	config.registerKryoType(Path.class);

	final AtomicReference<Object> descriptorCapture = new AtomicReference<>();

	StreamingRuntimeContext context = new StreamingRuntimeContext(
			createDescriptorCapturingMockOp(descriptorCapture, config),
			createMockEnvironment(),
			Collections.<String, Accumulator<?, ?>>emptyMap());

	@SuppressWarnings("unchecked")
	ReduceFunction<TaskInfo> reducer = (ReduceFunction<TaskInfo>) mock(ReduceFunction.class);

	ReducingStateDescriptor<TaskInfo> descr =
			new ReducingStateDescriptor<>("name", reducer, TaskInfo.class);

	context.getReducingState(descr);

	StateDescriptor<?, ?> descrIntercepted = (StateDescriptor<?, ?>) descriptorCapture.get();
	TypeSerializer<?> serializer = descrIntercepted.getSerializer();

	// check that the Path class is really registered, i.e., the execution config was applied
	assertTrue(serializer instanceof KryoSerializer);
	assertTrue(((KryoSerializer<?>) serializer).getKryo().getRegistration(Path.class).getId() > 0);
}
 
Example #18
Source File: EmptyFieldsCountAccumulator.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void merge(final Accumulator<Integer, ArrayList<Integer>> other) {
	// merge two vector accumulators by adding their up their vector components
	final List<Integer> otherVector = other.getLocalValue();
	for (int index = 0; index < otherVector.size(); index++) {
		updateResultVector(index, otherVector.get(index));
	}
}
 
Example #19
Source File: CollectionExecutor.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public CollectionExecutor(ExecutionConfig executionConfig) {
	this.executionConfig = executionConfig;
	
	this.intermediateResults = new HashMap<Operator<?>, List<?>>();
	this.accumulators = new HashMap<String, Accumulator<?,?>>();
	this.previousAggregates = new HashMap<String, Value>();
	this.aggregators = new HashMap<String, Aggregator<?>>();
	this.cachedFiles = new HashMap<String, Future<Path>>();
	this.userCodeClassLoader = Thread.currentThread().getContextClassLoader();
}
 
Example #20
Source File: ExecutionGraph.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Merges all accumulator results from the tasks previously executed in the Executions.
 * @return The accumulator map
 */
public Map<String, OptionalFailure<Accumulator<?, ?>>> aggregateUserAccumulators() {

	Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>();

	for (ExecutionVertex vertex : getAllExecutionVertices()) {
		Map<String, Accumulator<?, ?>> next = vertex.getCurrentExecutionAttempt().getUserAccumulators();
		if (next != null) {
			AccumulatorHelper.mergeInto(userAccumulators, next);
		}
	}

	return userAccumulators;
}
 
Example #21
Source File: RichOutputFormatTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testCheckRuntimeContextAccess() {
	final SerializedOutputFormat<Value> inputFormat = new SerializedOutputFormat<Value>();
	final TaskInfo taskInfo = new TaskInfo("test name", 3, 1, 3, 0);
	
	inputFormat.setRuntimeContext(new RuntimeUDFContext(
			taskInfo, getClass().getClassLoader(), new ExecutionConfig(),
			new HashMap<String, Future<Path>>(),
			new HashMap<String, Accumulator<?, ?>>(),
			new UnregisteredMetricsGroup()));

	assertEquals(inputFormat.getRuntimeContext().getIndexOfThisSubtask(), 1);
	assertEquals(inputFormat.getRuntimeContext().getNumberOfParallelSubtasks(),3);
}
 
Example #22
Source File: AbstractRuntimeUDFContext.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public <V, A extends Serializable> void addAccumulator(String name, Accumulator<V, A> accumulator) {
	if (accumulators.containsKey(name)) {
		throw new UnsupportedOperationException("The accumulator '" + name
				+ "' already exists and cannot be added.");
	}
	accumulators.put(name, accumulator);
}
 
Example #23
Source File: AbstractIterativeTask.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public <V, A extends Serializable> void addAccumulator(String name, Accumulator<V, A> newAccumulator) {
	// only add accumulator on first iteration
	if (inFirstIteration()) {
		super.addAccumulator(name, newAccumulator);
	}
}
 
Example #24
Source File: StringifiedAccumulatorResult.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Flatten a map of accumulator names to Accumulator instances into an array of StringifiedAccumulatorResult values.
    */
public static StringifiedAccumulatorResult[] stringifyAccumulatorResults(Map<String, OptionalFailure<Accumulator<?, ?>>> accs) {
	if (accs == null || accs.isEmpty()) {
		return new StringifiedAccumulatorResult[0];
	}
	else {
		StringifiedAccumulatorResult[] results = new StringifiedAccumulatorResult[accs.size()];

		int i = 0;
		for (Map.Entry<String, OptionalFailure<Accumulator<?, ?>>> entry : accs.entrySet()) {
			results[i++] = stringifyAccumulatorResult(entry.getKey(), entry.getValue());
		}
		return results;
	}
}
 
Example #25
Source File: ChainedDriver.java    From flink with Apache License 2.0 5 votes vote down vote up
public void setup(TaskConfig config, String taskName, Collector<OT> outputCollector,
		AbstractInvokable parent, ClassLoader userCodeClassLoader, ExecutionConfig executionConfig,
		Map<String, Accumulator<?,?>> accumulatorMap)
{
	this.config = config;
	this.taskName = taskName;
	this.userCodeClassLoader = userCodeClassLoader;
	this.metrics = parent.getEnvironment().getMetricGroup().getOrAddOperator(taskName);
	this.numRecordsIn = this.metrics.getIOMetricGroup().getNumRecordsInCounter();
	this.numRecordsOut = this.metrics.getIOMetricGroup().getNumRecordsOutCounter();
	this.outputCollector = new CountingCollector<>(outputCollector, numRecordsOut);

	Environment env = parent.getEnvironment();

	if (parent instanceof BatchTask) {
		this.udfContext = ((BatchTask<?, ?>) parent).createRuntimeContext(metrics);
	} else {
		this.udfContext = new DistributedRuntimeUDFContext(env.getTaskInfo(), userCodeClassLoader,
				parent.getExecutionConfig(), env.getDistributedCacheEntries(), accumulatorMap, metrics, env.getExternalResourceInfoProvider()
		);
	}

	this.executionConfig = executionConfig;
	this.objectReuseEnabled = executionConfig.isObjectReuseEnabled();

	setup(parent);
}
 
Example #26
Source File: Execution.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
void completeCancelling(Map<String, Accumulator<?, ?>> userAccumulators, IOMetrics metrics) {

		// the taskmanagers can themselves cancel tasks without an external trigger, if they find that the
		// network stack is canceled (for example by a failing / canceling receiver or sender
		// this is an artifact of the old network runtime, but for now we need to support task transitions
		// from running directly to canceled

		while (true) {
			ExecutionState current = this.state;

			if (current == CANCELED) {
				return;
			}
			else if (current == CANCELING || current == RUNNING || current == DEPLOYING) {

				updateAccumulatorsAndMetrics(userAccumulators, metrics);

				if (transitionState(current, CANCELED)) {
					finishCancellation();
					return;
				}

				// else fall through the loop
			}
			else {
				// failing in the meantime may happen and is no problem.
				// anything else is a serious problem !!!
				if (current != FAILED) {
					String message = String.format("Asynchronous race: Found %s in state %s after successful cancel call.", vertex.getTaskNameWithSubtaskIndex(), state);
					LOG.error(message);
					vertex.getExecutionGraph().failGlobal(new Exception(message));
				}
				return;
			}
		}
	}
 
Example #27
Source File: AbstractRuntimeUDFContext.java    From flink with Apache License 2.0 5 votes vote down vote up
public AbstractRuntimeUDFContext(TaskInfo taskInfo,
									ClassLoader userCodeClassLoader,
									ExecutionConfig executionConfig,
									Map<String, Accumulator<?, ?>> accumulators,
									Map<String, Future<Path>> cpTasks,
									MetricGroup metrics) {
	this.taskInfo = checkNotNull(taskInfo);
	this.userCodeClassLoader = userCodeClassLoader;
	this.executionConfig = executionConfig;
	this.distributedCache = new DistributedCache(checkNotNull(cpTasks));
	this.accumulators = checkNotNull(accumulators);
	this.metrics = metrics;
}
 
Example #28
Source File: StreamingRuntimeContextTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testListStateReturnsEmptyListByDefault() throws Exception {

	StreamingRuntimeContext context = new StreamingRuntimeContext(
			createListPlainMockOp(),
			createMockEnvironment(),
			Collections.<String, Accumulator<?, ?>>emptyMap());

	ListStateDescriptor<String> descr = new ListStateDescriptor<>("name", String.class);
	ListState<String> state = context.getListState(descr);

	Iterable<String> value = state.get();
	assertNotNull(value);
	assertFalse(value.iterator().hasNext());
}
 
Example #29
Source File: StreamingRuntimeContextTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testMapStateReturnsEmptyMapByDefault() throws Exception {

	StreamingRuntimeContext context = new StreamingRuntimeContext(
			createMapPlainMockOp(),
			createMockEnvironment(),
			Collections.<String, Accumulator<?, ?>>emptyMap());

	MapStateDescriptor<Integer, String> descr = new MapStateDescriptor<>("name", Integer.class, String.class);
	MapState<Integer, String> state = context.getMapState(descr);

	Iterable<Map.Entry<Integer, String>> value = state.entries();
	assertNotNull(value);
	assertFalse(value.iterator().hasNext());
}
 
Example #30
Source File: RichAsyncFunction.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public Map<String, Accumulator<?, ?>> getAllAccumulators() {
	throw new UnsupportedOperationException("Accumulators are not supported in rich async functions.");
}