org.apache.flink.api.common.accumulators.IntCounter Java Examples

The following examples show how to use org.apache.flink.api.common.accumulators.IntCounter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StringifiedAccumulatorResultTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void stringifyingResultsShouldIncorporateAccumulatorLocalValueDirectly() {
	final String name = "a";
	final int targetValue = 314159;
	final IntCounter acc = new IntCounter();
	acc.add(targetValue);
	final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>();
	accumulatorMap.put(name, OptionalFailure.of(acc));

	final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap);

	assertEquals(1, results.length);

	final StringifiedAccumulatorResult firstResult = results[0];
	assertEquals(name, firstResult.getName());
	assertEquals("IntCounter", firstResult.getType());
	assertEquals(Integer.toString(targetValue), firstResult.getValue());
}
 
Example #2
Source File: MigrationTestUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
	ListState<String> unionListState = context.getOperatorStateStore().getListState(
			CheckpointingNonParallelSourceWithListState.STATE_DESCRIPTOR);

	if (context.isRestored()) {
		assertThat(unionListState.get(),
				containsInAnyOrder(
						CheckpointingNonParallelSourceWithListState.CHECKPOINTED_STRING,
						CheckpointingNonParallelSourceWithListState.CHECKPOINTED_STRING_1,
						CheckpointingNonParallelSourceWithListState.CHECKPOINTED_STRING_2,
						CheckpointingNonParallelSourceWithListState.CHECKPOINTED_STRING_3));

		getRuntimeContext().addAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, new IntCounter());
		getRuntimeContext().getAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR).add(1);
	} else {
		throw new RuntimeException(
				"This source should always be restored because it's only used when restoring from a savepoint.");
	}
}
 
Example #3
Source File: MigrationTestUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
	ListState<String> unionListState = context.getOperatorStateStore().getUnionListState(
			CheckpointingNonParallelSourceWithListState.STATE_DESCRIPTOR);

	if (context.isRestored()) {
		assertThat(unionListState.get(),
				containsInAnyOrder(CheckpointingParallelSourceWithUnionListState.CHECKPOINTED_STRINGS));

		getRuntimeContext().addAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, new IntCounter());
		getRuntimeContext().getAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR).add(1);
	} else {
		throw new RuntimeException(
				"This source should always be restored because it's only used when restoring from a savepoint.");
	}
}
 
Example #4
Source File: MigrationTestUtils.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
	ListState<String> unionListState = context.getOperatorStateStore().getUnionListState(
			CheckpointingNonParallelSourceWithListState.STATE_DESCRIPTOR);

	if (context.isRestored()) {
		assertThat(unionListState.get(),
				containsInAnyOrder(CheckpointingParallelSourceWithUnionListState.CHECKPOINTED_STRINGS));

		getRuntimeContext().addAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, new IntCounter());
		getRuntimeContext().getAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR).add(1);
	} else {
		throw new RuntimeException(
				"This source should always be restored because it's only used when restoring from a savepoint.");
	}
}
 
Example #5
Source File: MigrationTestUtils.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
	ListState<String> unionListState = context.getOperatorStateStore().getListState(
			CheckpointingNonParallelSourceWithListState.STATE_DESCRIPTOR);

	if (context.isRestored()) {
		assertThat(unionListState.get(),
				containsInAnyOrder(
						CheckpointingNonParallelSourceWithListState.CHECKPOINTED_STRING,
						CheckpointingNonParallelSourceWithListState.CHECKPOINTED_STRING_1,
						CheckpointingNonParallelSourceWithListState.CHECKPOINTED_STRING_2,
						CheckpointingNonParallelSourceWithListState.CHECKPOINTED_STRING_3));

		getRuntimeContext().addAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, new IntCounter());
		getRuntimeContext().getAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR).add(1);
	} else {
		throw new RuntimeException(
				"This source should always be restored because it's only used when restoring from a savepoint.");
	}
}
 
Example #6
Source File: StringifiedAccumulatorResultTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void stringifyingResultsShouldIncorporateAccumulatorLocalValueDirectly() {
	final String name = "a";
	final int targetValue = 314159;
	final IntCounter acc = new IntCounter();
	acc.add(targetValue);
	final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>();
	accumulatorMap.put(name, OptionalFailure.of(acc));

	final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap);

	assertEquals(1, results.length);

	final StringifiedAccumulatorResult firstResult = results[0];
	assertEquals(name, firstResult.getName());
	assertEquals("IntCounter", firstResult.getType());
	assertEquals(Integer.toString(targetValue), firstResult.getValue());
}
 
Example #7
Source File: StringifiedAccumulatorResultTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void stringifyingResultsShouldIncorporateAccumulatorLocalValueDirectly() {
	final String name = "a";
	final int targetValue = 314159;
	final IntCounter acc = new IntCounter();
	acc.add(targetValue);
	final Map<String, OptionalFailure<Accumulator<?, ?>>> accumulatorMap = new HashMap<>();
	accumulatorMap.put(name, OptionalFailure.of(acc));

	final StringifiedAccumulatorResult[] results = StringifiedAccumulatorResult.stringifyAccumulatorResults(accumulatorMap);

	assertEquals(1, results.length);

	final StringifiedAccumulatorResult firstResult = results[0];
	assertEquals(name, firstResult.getName());
	assertEquals("IntCounter", firstResult.getType());
	assertEquals(Integer.toString(targetValue), firstResult.getValue());
}
 
Example #8
Source File: MigrationTestUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
	ListState<String> unionListState = context.getOperatorStateStore().getListState(
			CheckpointingNonParallelSourceWithListState.STATE_DESCRIPTOR);

	if (context.isRestored()) {
		assertThat(unionListState.get(),
				containsInAnyOrder(
						CheckpointingNonParallelSourceWithListState.CHECKPOINTED_STRING,
						CheckpointingNonParallelSourceWithListState.CHECKPOINTED_STRING_1,
						CheckpointingNonParallelSourceWithListState.CHECKPOINTED_STRING_2,
						CheckpointingNonParallelSourceWithListState.CHECKPOINTED_STRING_3));

		getRuntimeContext().addAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, new IntCounter());
		getRuntimeContext().getAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR).add(1);
	} else {
		throw new RuntimeException(
				"This source should always be restored because it's only used when restoring from a savepoint.");
	}
}
 
Example #9
Source File: MigrationTestUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void initializeState(FunctionInitializationContext context) throws Exception {
	ListState<String> unionListState = context.getOperatorStateStore().getUnionListState(
			CheckpointingNonParallelSourceWithListState.STATE_DESCRIPTOR);

	if (context.isRestored()) {
		assertThat(unionListState.get(),
				containsInAnyOrder(CheckpointingParallelSourceWithUnionListState.CHECKPOINTED_STRINGS));

		getRuntimeContext().addAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, new IntCounter());
		getRuntimeContext().getAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR).add(1);
	} else {
		throw new RuntimeException(
				"This source should always be restored because it's only used when restoring from a savepoint.");
	}
}
 
Example #10
Source File: AccumulatorITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void open(Configuration parameters) {

	// Add counters using convenience functions
	this.cntNumLines = getRuntimeContext().getIntCounter("num-lines");
	this.wordsPerLineDistribution = getRuntimeContext().getHistogram("words-per-line");

	// Add built-in accumulator without convenience function
	getRuntimeContext().addAccumulator("open-close-counter", this.openCloseCounter);

	// Add custom counter
	this.distinctWords = new SetAccumulator<>();
	this.getRuntimeContext().addAccumulator("distinct-words", distinctWords);

	// Create counter and test increment
	IntCounter simpleCounter = getRuntimeContext().getIntCounter("simple-counter");
	simpleCounter.add(1);
	Assert.assertEquals(simpleCounter.getLocalValue().intValue(), 1);

	// Test if we get the same counter
	IntCounter simpleCounter2 = getRuntimeContext().getIntCounter("simple-counter");
	Assert.assertEquals(simpleCounter.getLocalValue(), simpleCounter2.getLocalValue());

	// Should fail if we request it with different type
	try {
		@SuppressWarnings("unused")
		DoubleCounter simpleCounter3 = getRuntimeContext().getDoubleCounter("simple-counter");
		// DoubleSumAggregator longAggregator3 = (DoubleSumAggregator)
		// getRuntimeContext().getAggregator("custom",
		// DoubleSumAggregator.class);
		Assert.fail("Should not be able to obtain previously created counter with different type");
	}
	catch (UnsupportedOperationException ex) {
		// expected!
	}

	// Test counter used in open() and closed()
	this.openCloseCounter.add(0.5);
}
 
Example #11
Source File: AccumulatorITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void open(Configuration parameters) {

	// Add counters using convenience functions
	this.cntNumLines = getRuntimeContext().getIntCounter("num-lines");
	this.wordsPerLineDistribution = getRuntimeContext().getHistogram("words-per-line");

	// Add built-in accumulator without convenience function
	getRuntimeContext().addAccumulator("open-close-counter", this.openCloseCounter);

	// Add custom counter
	this.distinctWords = new SetAccumulator<>();
	this.getRuntimeContext().addAccumulator("distinct-words", distinctWords);

	// Create counter and test increment
	IntCounter simpleCounter = getRuntimeContext().getIntCounter("simple-counter");
	simpleCounter.add(1);
	Assert.assertEquals(simpleCounter.getLocalValue().intValue(), 1);

	// Test if we get the same counter
	IntCounter simpleCounter2 = getRuntimeContext().getIntCounter("simple-counter");
	Assert.assertEquals(simpleCounter.getLocalValue(), simpleCounter2.getLocalValue());

	// Should fail if we request it with different type
	try {
		@SuppressWarnings("unused")
		DoubleCounter simpleCounter3 = getRuntimeContext().getDoubleCounter("simple-counter");
		// DoubleSumAggregator longAggregator3 = (DoubleSumAggregator)
		// getRuntimeContext().getAggregator("custom",
		// DoubleSumAggregator.class);
		Assert.fail("Should not be able to obtain previously created counter with different type");
	}
	catch (UnsupportedOperationException ex) {
		// expected!
	}

	// Test counter used in open() and closed()
	this.openCloseCounter.add(0.5);
}
 
Example #12
Source File: StatefulJobSavepointMigrationITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void open() throws Exception {
	super.open();

	// have to re-register to ensure that our onEventTime() is called
	getInternalTimerService(
		"timer",
		LongSerializer.INSTANCE,
		this);

	getRuntimeContext().addAccumulator(SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, new IntCounter());
	getRuntimeContext().addAccumulator(SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, new IntCounter());
	getRuntimeContext().addAccumulator(SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, new IntCounter());
}
 
Example #13
Source File: StatefulJobSavepointMigrationITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void open() throws Exception {
	super.open();

	// have to re-register to ensure that our onEventTime() is called
	getInternalTimerService(
		"timer",
		LongSerializer.INSTANCE,
		this);

	getRuntimeContext().addAccumulator(SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, new IntCounter());
	getRuntimeContext().addAccumulator(SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, new IntCounter());
	getRuntimeContext().addAccumulator(SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, new IntCounter());
}
 
Example #14
Source File: LegacyStatefulJobSavepointMigrationITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void open() throws Exception {
	super.open();

	timerService = getInternalTimerService(
		"timer",
		LongSerializer.INSTANCE,
		this);

	getRuntimeContext().addAccumulator(SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, new IntCounter());
	getRuntimeContext().addAccumulator(SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, new IntCounter());
	getRuntimeContext().addAccumulator(SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, new IntCounter());
}
 
Example #15
Source File: LegacyStatefulJobSavepointMigrationITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void open() throws Exception {
	super.open();

	timerService = getInternalTimerService(
		"timer",
		LongSerializer.INSTANCE,
		this);

	getRuntimeContext().addAccumulator(SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, new IntCounter());
	getRuntimeContext().addAccumulator(SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, new IntCounter());
	getRuntimeContext().addAccumulator(SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, new IntCounter());
}
 
Example #16
Source File: AccumulatorITCase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void open(Configuration parameters) {

	// Add counters using convenience functions
	this.cntNumLines = getRuntimeContext().getIntCounter("num-lines");
	this.wordsPerLineDistribution = getRuntimeContext().getHistogram("words-per-line");

	// Add built-in accumulator without convenience function
	getRuntimeContext().addAccumulator("open-close-counter", this.openCloseCounter);

	// Add custom counter
	this.distinctWords = new SetAccumulator<>();
	this.getRuntimeContext().addAccumulator("distinct-words", distinctWords);

	// Create counter and test increment
	IntCounter simpleCounter = getRuntimeContext().getIntCounter("simple-counter");
	simpleCounter.add(1);
	Assert.assertEquals(simpleCounter.getLocalValue().intValue(), 1);

	// Test if we get the same counter
	IntCounter simpleCounter2 = getRuntimeContext().getIntCounter("simple-counter");
	Assert.assertEquals(simpleCounter.getLocalValue(), simpleCounter2.getLocalValue());

	// Should fail if we request it with different type
	try {
		@SuppressWarnings("unused")
		DoubleCounter simpleCounter3 = getRuntimeContext().getDoubleCounter("simple-counter");
		// DoubleSumAggregator longAggregator3 = (DoubleSumAggregator)
		// getRuntimeContext().getAggregator("custom",
		// DoubleSumAggregator.class);
		Assert.fail("Should not be able to obtain previously created counter with different type");
	}
	catch (UnsupportedOperationException ex) {
		// expected!
	}

	// Test counter used in open() and closed()
	this.openCloseCounter.add(0.5);
}
 
Example #17
Source File: LegacyStatefulJobSavepointMigrationITCase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void open() throws Exception {
	super.open();

	timerService = getInternalTimerService(
		"timer",
		LongSerializer.INSTANCE,
		this);

	getRuntimeContext().addAccumulator(SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, new IntCounter());
	getRuntimeContext().addAccumulator(SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, new IntCounter());
	getRuntimeContext().addAccumulator(SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, new IntCounter());
}
 
Example #18
Source File: StatefulJobSavepointMigrationITCase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
public void open() throws Exception {
	super.open();

	// have to re-register to ensure that our onEventTime() is called
	getInternalTimerService(
		"timer",
		LongSerializer.INSTANCE,
		this);

	getRuntimeContext().addAccumulator(SUCCESSFUL_PROCESS_CHECK_ACCUMULATOR, new IntCounter());
	getRuntimeContext().addAccumulator(SUCCESSFUL_EVENT_TIME_CHECK_ACCUMULATOR, new IntCounter());
	getRuntimeContext().addAccumulator(SUCCESSFUL_PROCESSING_TIME_CHECK_ACCUMULATOR, new IntCounter());
}
 
Example #19
Source File: ReductionsTest.java    From stateful-functions with Apache License 2.0 4 votes vote down vote up
@Override
public IntCounter getIntCounter(String name) {
  throw new UnsupportedOperationException();
}
 
Example #20
Source File: ReductionsTest.java    From flink-statefun with Apache License 2.0 4 votes vote down vote up
@Override
public IntCounter getIntCounter(String name) {
  throw new UnsupportedOperationException();
}
 
Example #21
Source File: CepRuntimeContext.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public IntCounter getIntCounter(final String name) {
	throw new UnsupportedOperationException("Int counters are not supported.");
}
 
Example #22
Source File: MigrationTestUtils.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);

	getRuntimeContext().addAccumulator(NUM_ELEMENTS_ACCUMULATOR, new IntCounter());
}
 
Example #23
Source File: RichAsyncFunction.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public IntCounter getIntCounter(String name) {
	throw new UnsupportedOperationException("Int counters are not supported in rich async functions.");
}
 
Example #24
Source File: SavepointRuntimeContext.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public IntCounter getIntCounter(String name) {
	return ctx.getIntCounter(name);
}
 
Example #25
Source File: SubtaskExecutionAttemptAccumulatorsHandlerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testHandleRequest() throws Exception {

	// Instance the handler.
	final RestHandlerConfiguration restHandlerConfiguration = RestHandlerConfiguration.fromConfiguration(new Configuration());

	final SubtaskExecutionAttemptAccumulatorsHandler handler = new SubtaskExecutionAttemptAccumulatorsHandler(
		() -> null,
		Time.milliseconds(100L),
		Collections.emptyMap(),
		SubtaskExecutionAttemptAccumulatorsHeaders.getInstance(),
		new DefaultExecutionGraphCache(
			restHandlerConfiguration.getTimeout(),
			Time.milliseconds(restHandlerConfiguration.getRefreshInterval())),
		TestingUtils.defaultExecutor());

	// Instance a empty request.
	final HandlerRequest<EmptyRequestBody, SubtaskAttemptMessageParameters> request = new HandlerRequest<>(
		EmptyRequestBody.getInstance(),
		new SubtaskAttemptMessageParameters()
	);

	final Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>(3);
	userAccumulators.put("IntCounter", OptionalFailure.of(new IntCounter(10)));
	userAccumulators.put("LongCounter", OptionalFailure.of(new LongCounter(100L)));
	userAccumulators.put("Failure", OptionalFailure.ofFailure(new FlinkRuntimeException("Test")));

	// Instance the expected result.
	final StringifiedAccumulatorResult[] accumulatorResults =
		StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);

	final int attemptNum = 1;
	final int subtaskIndex = 2;

	// Instance the tested execution.
	final ArchivedExecution execution = new ArchivedExecution(
		accumulatorResults,
		null,
		new ExecutionAttemptID(),
		attemptNum,
		ExecutionState.FINISHED,
		null,
		null,
		null,
		subtaskIndex,
		new long[ExecutionState.values().length]);

	// Invoke tested method.
	final SubtaskExecutionAttemptAccumulatorsInfo accumulatorsInfo = handler.handleRequest(request, execution);

	final ArrayList<UserAccumulator> userAccumulatorList = new ArrayList<>(userAccumulators.size());
	for (StringifiedAccumulatorResult accumulatorResult : accumulatorResults) {
		userAccumulatorList.add(
			new UserAccumulator(
				accumulatorResult.getName(),
				accumulatorResult.getType(),
				accumulatorResult.getValue()));
	}

	final SubtaskExecutionAttemptAccumulatorsInfo expected = new SubtaskExecutionAttemptAccumulatorsInfo(
		subtaskIndex,
		attemptNum,
		execution.getAttemptId().toString(),
		userAccumulatorList);

	// Verify.
	assertEquals(expected, accumulatorsInfo);
}
 
Example #26
Source File: ExecutionGraphDeploymentTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Verifies that {@link ExecutionGraph#updateState(TaskExecutionState)} updates the accumulators and metrics for an
 * execution that failed or was canceled.
 */
@Test
public void testAccumulatorsAndMetricsForwarding() throws Exception {
	final JobVertexID jid1 = new JobVertexID();
	final JobVertexID jid2 = new JobVertexID();

	JobVertex v1 = new JobVertex("v1", jid1);
	JobVertex v2 = new JobVertex("v2", jid2);

	Tuple2<ExecutionGraph, Map<ExecutionAttemptID, Execution>> graphAndExecutions = setupExecution(v1, 1, v2, 1);
	ExecutionGraph graph = graphAndExecutions.f0;

	// verify behavior for canceled executions
	Execution execution1 = graphAndExecutions.f1.values().iterator().next();

	IOMetrics ioMetrics = new IOMetrics(0, 0, 0, 0);
	Map<String, Accumulator<?, ?>> accumulators = new HashMap<>();
	accumulators.put("acc", new IntCounter(4));
	AccumulatorSnapshot accumulatorSnapshot = new AccumulatorSnapshot(graph.getJobID(), execution1.getAttemptId(), accumulators);

	TaskExecutionState state = new TaskExecutionState(graph.getJobID(), execution1.getAttemptId(), ExecutionState.CANCELED, null, accumulatorSnapshot, ioMetrics);

	graph.updateState(state);

	assertEquals(ioMetrics, execution1.getIOMetrics());
	assertNotNull(execution1.getUserAccumulators());
	assertEquals(4, execution1.getUserAccumulators().get("acc").getLocalValue());

	// verify behavior for failed executions
	Execution execution2 = graphAndExecutions.f1.values().iterator().next();

	IOMetrics ioMetrics2 = new IOMetrics(0, 0, 0, 0);
	Map<String, Accumulator<?, ?>> accumulators2 = new HashMap<>();
	accumulators2.put("acc", new IntCounter(8));
	AccumulatorSnapshot accumulatorSnapshot2 = new AccumulatorSnapshot(graph.getJobID(), execution2.getAttemptId(), accumulators2);

	TaskExecutionState state2 = new TaskExecutionState(graph.getJobID(), execution2.getAttemptId(), ExecutionState.FAILED, null, accumulatorSnapshot2, ioMetrics2);

	graph.updateState(state2);

	assertEquals(ioMetrics2, execution2.getIOMetrics());
	assertNotNull(execution2.getUserAccumulators());
	assertEquals(8, execution2.getUserAccumulators().get("acc").getLocalValue());
}
 
Example #27
Source File: RichAsyncFunction.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public IntCounter getIntCounter(String name) {
	throw new UnsupportedOperationException("Int counters are not supported in rich async functions.");
}
 
Example #28
Source File: ExecutionGraphDeploymentTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Verifies that {@link ExecutionGraph#updateState(TaskExecutionState)} updates the accumulators and metrics for an
 * execution that failed or was canceled.
 */
@Test
public void testAccumulatorsAndMetricsForwarding() throws Exception {
	final JobVertexID jid1 = new JobVertexID();
	final JobVertexID jid2 = new JobVertexID();

	JobVertex v1 = new JobVertex("v1", jid1);
	JobVertex v2 = new JobVertex("v2", jid2);

	Tuple2<ExecutionGraph, Map<ExecutionAttemptID, Execution>> graphAndExecutions = setupExecution(v1, 1, v2, 1);
	ExecutionGraph graph = graphAndExecutions.f0;

	// verify behavior for canceled executions
	Execution execution1 = graphAndExecutions.f1.values().iterator().next();

	IOMetrics ioMetrics = new IOMetrics(0, 0, 0, 0);
	Map<String, Accumulator<?, ?>> accumulators = new HashMap<>();
	accumulators.put("acc", new IntCounter(4));
	AccumulatorSnapshot accumulatorSnapshot = new AccumulatorSnapshot(graph.getJobID(), execution1.getAttemptId(), accumulators);

	TaskExecutionState state = new TaskExecutionState(graph.getJobID(), execution1.getAttemptId(), ExecutionState.CANCELED, null, accumulatorSnapshot, ioMetrics);

	graph.updateState(state);

	assertEquals(ioMetrics, execution1.getIOMetrics());
	assertNotNull(execution1.getUserAccumulators());
	assertEquals(4, execution1.getUserAccumulators().get("acc").getLocalValue());

	// verify behavior for failed executions
	Execution execution2 = graphAndExecutions.f1.values().iterator().next();

	IOMetrics ioMetrics2 = new IOMetrics(0, 0, 0, 0);
	Map<String, Accumulator<?, ?>> accumulators2 = new HashMap<>();
	accumulators2.put("acc", new IntCounter(8));
	AccumulatorSnapshot accumulatorSnapshot2 = new AccumulatorSnapshot(graph.getJobID(), execution2.getAttemptId(), accumulators2);

	TaskExecutionState state2 = new TaskExecutionState(graph.getJobID(), execution2.getAttemptId(), ExecutionState.FAILED, null, accumulatorSnapshot2, ioMetrics2);

	graph.updateState(state2);

	assertEquals(ioMetrics2, execution2.getIOMetrics());
	assertNotNull(execution2.getUserAccumulators());
	assertEquals(8, execution2.getUserAccumulators().get("acc").getLocalValue());
}
 
Example #29
Source File: SubtaskExecutionAttemptAccumulatorsHandlerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testHandleRequest() throws Exception {

	// Instance the handler.
	final RestHandlerConfiguration restHandlerConfiguration = RestHandlerConfiguration.fromConfiguration(new Configuration());

	final SubtaskExecutionAttemptAccumulatorsHandler handler = new SubtaskExecutionAttemptAccumulatorsHandler(
		() -> null,
		Time.milliseconds(100L),
		Collections.emptyMap(),
		SubtaskExecutionAttemptAccumulatorsHeaders.getInstance(),
		new ExecutionGraphCache(
			restHandlerConfiguration.getTimeout(),
			Time.milliseconds(restHandlerConfiguration.getRefreshInterval())),
		TestingUtils.defaultExecutor());

	// Instance a empty request.
	final HandlerRequest<EmptyRequestBody, SubtaskAttemptMessageParameters> request = new HandlerRequest<>(
		EmptyRequestBody.getInstance(),
		new SubtaskAttemptMessageParameters()
	);

	final Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>(3);
	userAccumulators.put("IntCounter", OptionalFailure.of(new IntCounter(10)));
	userAccumulators.put("LongCounter", OptionalFailure.of(new LongCounter(100L)));
	userAccumulators.put("Failure", OptionalFailure.ofFailure(new FlinkRuntimeException("Test")));

	// Instance the expected result.
	final StringifiedAccumulatorResult[] accumulatorResults =
		StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);

	final int attemptNum = 1;
	final int subtaskIndex = 2;

	// Instance the tested execution.
	final ArchivedExecution execution = new ArchivedExecution(
		accumulatorResults,
		null,
		new ExecutionAttemptID(),
		attemptNum,
		ExecutionState.FINISHED,
		null,
		null,
		null,
		subtaskIndex,
		new long[ExecutionState.values().length]);

	// Invoke tested method.
	final SubtaskExecutionAttemptAccumulatorsInfo accumulatorsInfo = handler.handleRequest(request, execution);

	final ArrayList<UserAccumulator> userAccumulatorList = new ArrayList<>(userAccumulators.size());
	for (StringifiedAccumulatorResult accumulatorResult : accumulatorResults) {
		userAccumulatorList.add(
			new UserAccumulator(
				accumulatorResult.getName(),
				accumulatorResult.getType(),
				accumulatorResult.getValue()));
	}

	final SubtaskExecutionAttemptAccumulatorsInfo expected = new SubtaskExecutionAttemptAccumulatorsInfo(
		subtaskIndex,
		attemptNum,
		execution.getAttemptId().toString(),
		userAccumulatorList);

	// Verify.
	assertEquals(expected, accumulatorsInfo);
}
 
Example #30
Source File: LegacyStatefulJobSavepointMigrationITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);

	getRuntimeContext().addAccumulator(SUCCESSFUL_RESTORE_CHECK_ACCUMULATOR, new IntCounter());
}