Java Code Examples for org.apache.flink.runtime.jobgraph.OperatorID

The following examples show how to use org.apache.flink.runtime.jobgraph.OperatorID. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink   Source File: CheckpointMetadataLoadingTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that savepoint loading fails when there is a max-parallelism mismatch.
 */
@Test
public void testMaxParallelismMismatch() throws Exception {
	final OperatorID operatorId = new OperatorID();
	final int parallelism = 128128;

	final CompletedCheckpointStorageLocation testSavepoint = createSavepointWithOperatorSubtaskState(242L, operatorId, parallelism);
	final Map<JobVertexID, ExecutionJobVertex> tasks = createTasks(operatorId, parallelism, parallelism + 1);

	try {
		Checkpoints.loadAndValidateCheckpoint(new JobID(), tasks, testSavepoint, cl, false);
		fail("Did not throw expected Exception");
	} catch (IllegalStateException expected) {
		assertTrue(expected.getMessage().contains("Max parallelism mismatch"));
	}
}
 
Example 2
Source Project: flink   Source File: TestJobClient.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<CoordinationResponse> sendCoordinationRequest(OperatorID operatorId, CoordinationRequest request) {
	if (jobStatus.isGloballyTerminalState()) {
		throw new RuntimeException("Job terminated");
	}

	Assert.assertEquals(this.operatorId, operatorId);
	CoordinationResponse response;
	try {
		response = handler.handleCoordinationRequest(request).get();
	} catch (Exception e) {
		throw new RuntimeException(e);
	}

	if (infoProvider.isJobFinished()) {
		jobStatus = JobStatus.FINISHED;
		jobExecutionResult = new JobExecutionResult(jobId, 0, infoProvider.getAccumulatorResults());
	}

	return CompletableFuture.completedFuture(response);
}
 
Example 3
@Test
public void testMultipleStatefulOperatorChainedSnapshotAndRestore() throws Exception {

	OperatorID headOperatorID = new OperatorID(42L, 42L);
	OperatorID tailOperatorID = new OperatorID(44L, 44L);

	JobManagerTaskRestore restore = createRunAndCheckpointOperatorChain(
		headOperatorID,
		new CounterOperator("head"),
		tailOperatorID,
		new CounterOperator("tail"),
		Optional.empty());

	TaskStateSnapshot stateHandles = restore.getTaskStateSnapshot();

	assertEquals(2, stateHandles.getSubtaskStateMappings().size());

	createRunAndCheckpointOperatorChain(
		headOperatorID,
		new CounterOperator("head"),
		tailOperatorID,
		new CounterOperator("tail"),
		Optional.of(restore));

	assertEquals(new HashSet<>(Arrays.asList(headOperatorID, tailOperatorID)), RESTORED_OPERATORS);
}
 
Example 4
@Test
public void testLifeCycleFull() throws Exception {
	ACTUAL_ORDER_TRACKING.clear();

	Configuration taskManagerConfig = new Configuration();
	StreamConfig cfg = new StreamConfig(new Configuration());
	MockSourceFunction srcFun = new MockSourceFunction();

	cfg.setStreamOperator(new LifecycleTrackingStreamSource<>(srcFun, true));
	cfg.setOperatorID(new OperatorID());
	cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	Task task = StreamTaskTest.createTask(SourceStreamTask.class, cfg, taskManagerConfig);

	task.startTaskThread();

	LifecycleTrackingStreamSource.runStarted.await();

	// wait for clean termination
	task.getExecutingThread().join();
	assertEquals(ExecutionState.FINISHED, task.getExecutionState());
	assertEquals(EXPECTED_CALL_ORDER_FULL, ACTUAL_ORDER_TRACKING);
}
 
Example 5
Source Project: flink   Source File: TaskStateSnapshotTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void discardState() throws Exception {
	TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot();
	OperatorID operatorID_1 = new OperatorID();
	OperatorID operatorID_2 = new OperatorID();

	OperatorSubtaskState operatorSubtaskState_1 = mock(OperatorSubtaskState.class);
	OperatorSubtaskState operatorSubtaskState_2 = mock(OperatorSubtaskState.class);

	taskStateSnapshot.putSubtaskStateByOperatorID(operatorID_1, operatorSubtaskState_1);
	taskStateSnapshot.putSubtaskStateByOperatorID(operatorID_2, operatorSubtaskState_2);

	taskStateSnapshot.discardState();
	verify(operatorSubtaskState_1).discardState();
	verify(operatorSubtaskState_2).discardState();
}
 
Example 6
Source Project: Flink-CEPplus   Source File: OperatorScopeFormat.java    License: Apache License 2.0 6 votes vote down vote up
public String[] formatScope(TaskMetricGroup parent, OperatorID operatorID, String operatorName) {

		final String[] template = copyTemplate();
		final String[] values = {
				parent.parent().parent().hostname(),
				parent.parent().parent().taskManagerId(),
				valueOrNull(parent.parent().jobId()),
				valueOrNull(parent.parent().jobName()),
				valueOrNull(parent.vertexId()),
				valueOrNull(parent.executionId()),
				valueOrNull(parent.taskName()),
				String.valueOf(parent.subtaskIndex()),
				String.valueOf(parent.attemptNumber()),
				valueOrNull(operatorID),
				valueOrNull(operatorName)
		};
		return bindVariables(template, values);
	}
 
Example 7
Source Project: flink   Source File: FlinkKafkaProducerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testOpenKafkaSerializationSchemaProducer() throws Exception {
	OpenTestingKafkaSerializationSchema schema = new OpenTestingKafkaSerializationSchema();
	Properties properties = new Properties();
	properties.put("bootstrap.servers", "localhost:9092");
	FlinkKafkaProducer<Integer> kafkaProducer = new FlinkKafkaProducer<>(
		"test-topic",
		schema,
		properties,
		FlinkKafkaProducer.Semantic.AT_LEAST_ONCE
	);

	OneInputStreamOperatorTestHarness<Integer, Object> testHarness = new OneInputStreamOperatorTestHarness<>(
		new StreamSink<>(kafkaProducer),
		1,
		1,
		0,
		IntSerializer.INSTANCE,
		new OperatorID(1, 1));

	testHarness.open();

	assertThat(schema.openCalled, equalTo(true));
}
 
Example 8
Source Project: flink   Source File: TaskStateSnapshotTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void discardState() throws Exception {
	TaskStateSnapshot taskStateSnapshot = new TaskStateSnapshot();
	OperatorID operatorID_1 = new OperatorID();
	OperatorID operatorID_2 = new OperatorID();

	OperatorSubtaskState operatorSubtaskState_1 = mock(OperatorSubtaskState.class);
	OperatorSubtaskState operatorSubtaskState_2 = mock(OperatorSubtaskState.class);

	taskStateSnapshot.putSubtaskStateByOperatorID(operatorID_1, operatorSubtaskState_1);
	taskStateSnapshot.putSubtaskStateByOperatorID(operatorID_2, operatorSubtaskState_2);

	taskStateSnapshot.discardState();
	verify(operatorSubtaskState_1).discardState();
	verify(operatorSubtaskState_2).discardState();
}
 
Example 9
Source Project: flink   Source File: OperatorGroupTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testGenerateScopeDefault() throws Exception {
	TaskManagerMetricGroup tmGroup = new TaskManagerMetricGroup(registry, "theHostName", "test-tm-id");
	TaskManagerJobMetricGroup jmGroup = new TaskManagerJobMetricGroup(registry, tmGroup, new JobID(), "myJobName");
	TaskMetricGroup taskGroup = new TaskMetricGroup(
			registry, jmGroup,  new JobVertexID(),  new AbstractID(), "aTaskName", 11, 0);
	OperatorMetricGroup opGroup = new OperatorMetricGroup(registry, taskGroup, new OperatorID(), "myOpName");

	assertArrayEquals(
			new String[] { "theHostName", "taskmanager", "test-tm-id", "myJobName", "myOpName", "11" },
			opGroup.getScopeComponents());

	assertEquals(
			"theHostName.taskmanager.test-tm-id.myJobName.myOpName.11.name",
			opGroup.getMetricIdentifier("name"));
}
 
Example 10
Source Project: flink   Source File: StreamSource.java    License: Apache License 2.0 6 votes vote down vote up
public LatencyMarksEmitter(
		final ProcessingTimeService processingTimeService,
		final Output<StreamRecord<OUT>> output,
		long latencyTrackingInterval,
		final OperatorID operatorId,
		final int subtaskIndex) {

	latencyMarkTimer = processingTimeService.scheduleWithFixedDelay(
		new ProcessingTimeCallback() {
			@Override
			public void onProcessingTime(long timestamp) throws Exception {
				try {
					// ProcessingTimeService callbacks are executed under the checkpointing lock
					output.emitLatencyMarker(new LatencyMarker(processingTimeService.getCurrentProcessingTime(), operatorId, subtaskIndex));
				} catch (Throwable t) {
					// we catch the Throwables here so that we don't trigger the processing
					// timer services async exception handler
					LOG.warn("Error while emitting latency marker.", t);
				}
			}
		},
		0L,
		latencyTrackingInterval);
}
 
Example 11
Source Project: flink   Source File: KeyedStateInputFormatTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test(expected = IOException.class)
public void testInvalidProcessReaderFunctionFails() throws Exception {
	OperatorID operatorID = OperatorIDGenerator.fromUid("uid");

	OperatorSubtaskState state = createOperatorSubtaskState(new StreamFlatMap<>(new StatefulFunction()));
	OperatorState operatorState = new OperatorState(operatorID, 1, 128);
	operatorState.putState(0, state);

	KeyedStateInputFormat<?, ?> format = new KeyedStateInputFormat<>(operatorState, new MemoryStateBackend(), Types.INT, new ReaderFunction());
	KeyGroupRangeInputSplit split = format.createInputSplits(1)[0];

	KeyedStateReaderFunction<Integer, Integer> userFunction = new InvalidReaderFunction();

	readInputSplit(split, userFunction);

	Assert.fail("KeyedStateReaderFunction did not fail on invalid RuntimeContext use");
}
 
Example 12
Source Project: flink   Source File: CompletedCheckpointStoreTest.java    License: Apache License 2.0 6 votes vote down vote up
public static TestCompletedCheckpoint createCheckpoint(
	int id,
	SharedStateRegistry sharedStateRegistry) throws IOException {

	int numberOfStates = 4;
	CheckpointProperties props = CheckpointProperties.forCheckpoint(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION);

	OperatorID operatorID = new OperatorID();

	Map<OperatorID, OperatorState> operatorGroupState = new HashMap<>();
	OperatorState operatorState = new OperatorState(operatorID, numberOfStates, numberOfStates);
	operatorGroupState.put(operatorID, operatorState);

	for (int i = 0; i < numberOfStates; i++) {
		OperatorSubtaskState subtaskState =
			new TestOperatorSubtaskState();

		operatorState.putState(i, subtaskState);
	}

	operatorState.registerSharedStates(sharedStateRegistry);

	return new TestCompletedCheckpoint(new JobID(), id, 0, operatorGroupState, props);
}
 
Example 13
Source Project: flink   Source File: JobMasterTriggerSavepointITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public boolean triggerCheckpoint(final CheckpointMetaData checkpointMetaData, final CheckpointOptions checkpointOptions, final boolean advanceToEndOfEventTime) {
	final TaskStateSnapshot checkpointStateHandles = new TaskStateSnapshot();
	checkpointStateHandles.putSubtaskStateByOperatorID(
		OperatorID.fromJobVertexID(getEnvironment().getJobVertexId()),
		new OperatorSubtaskState());

	getEnvironment().acknowledgeCheckpoint(
		checkpointMetaData.getCheckpointId(),
		new CheckpointMetrics(),
		checkpointStateHandles);

	triggerCheckpointLatch.countDown();

	return true;
}
 
Example 14
@Test
public void testLifeCycleFull() throws Exception {
	ACTUAL_ORDER_TRACKING.clear();

	Configuration taskManagerConfig = new Configuration();
	StreamConfig cfg = new StreamConfig(new Configuration());
	MockSourceFunction srcFun = new MockSourceFunction();

	cfg.setStreamOperator(new LifecycleTrackingStreamSource<>(srcFun, true));
	cfg.setOperatorID(new OperatorID());
	cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	try (ShuffleEnvironment shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build()) {
		Task task = StreamTaskTest.createTask(SourceStreamTask.class, shuffleEnvironment, cfg, taskManagerConfig);

		task.startTaskThread();

		LifecycleTrackingStreamSource.runStarted.await();

		// wait for clean termination
		task.getExecutingThread().join();
		assertEquals(ExecutionState.FINISHED, task.getExecutionState());
		assertEquals(EXPECTED_CALL_ORDER_FULL, ACTUAL_ORDER_TRACKING);
	}
}
 
Example 15
Source Project: flink   Source File: CheckpointCoordinatorTest.java    License: Apache License 2.0 6 votes vote down vote up
static TaskStateSnapshot mockSubtaskState(
	JobVertexID jobVertexID,
	int index,
	KeyGroupRange keyGroupRange) throws IOException {

	OperatorStateHandle partitionableState = generatePartitionableStateHandle(jobVertexID, index, 2, 8, false);
	KeyGroupsStateHandle partitionedKeyGroupState = generateKeyGroupState(jobVertexID, keyGroupRange, false);

	TaskStateSnapshot subtaskStates = spy(new TaskStateSnapshot());
	OperatorSubtaskState subtaskState = spy(new OperatorSubtaskState(
		partitionableState, null, partitionedKeyGroupState, null)
	);

	subtaskStates.putSubtaskStateByOperatorID(OperatorID.fromJobVertexID(jobVertexID), subtaskState);

	return subtaskStates;
}
 
Example 16
Source Project: flink   Source File: RestoreStreamTaskTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testRestoreWithoutState() throws Exception {
	OperatorID headOperatorID = new OperatorID(42L, 42L);
	OperatorID tailOperatorID = new OperatorID(44L, 44L);

	JobManagerTaskRestore restore = createRunAndCheckpointOperatorChain(
		headOperatorID,
		new StatelessOperator(),
		tailOperatorID,
		new CounterOperator(),
		Optional.empty());

	TaskStateSnapshot stateHandles = restore.getTaskStateSnapshot();
	assertEquals(2, stateHandles.getSubtaskStateMappings().size());

	createRunAndCheckpointOperatorChain(
		headOperatorID,
		new StatelessOperator(),
		tailOperatorID,
		new CounterOperator(),
		Optional.of(restore));

	assertEquals(new HashSet<>(Arrays.asList(headOperatorID, tailOperatorID)), RESTORED_OPERATORS);
}
 
Example 17
Source Project: flink   Source File: DataSinkTask.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Initializes the OutputFormat implementation and configuration.
 * 
 * @throws RuntimeException
 *         Throws if instance of OutputFormat implementation can not be
 *         obtained.
 */
private void initOutputFormat() {
	ClassLoader userCodeClassLoader = getUserCodeClassLoader();
	// obtain task configuration (including stub parameters)
	Configuration taskConf = getTaskConfiguration();
	this.config = new TaskConfig(taskConf);

	final Pair<OperatorID, OutputFormat<IT>> operatorIDAndOutputFormat;
	InputOutputFormatContainer formatContainer = new InputOutputFormatContainer(config, userCodeClassLoader);
	try {
		operatorIDAndOutputFormat = formatContainer.getUniqueOutputFormat();
		this.format = operatorIDAndOutputFormat.getValue();

		// check if the class is a subclass, if the check is required
		if (!OutputFormat.class.isAssignableFrom(this.format.getClass())) {
			throw new RuntimeException("The class '" + this.format.getClass().getName() + "' is not a subclass of '" + 
					OutputFormat.class.getName() + "' as is required.");
		}
	}
	catch (ClassCastException ccex) {
		throw new RuntimeException("The stub class is not a proper subclass of " + OutputFormat.class.getName(), ccex);
	}

	Thread thread = Thread.currentThread();
	ClassLoader original = thread.getContextClassLoader();
	// configure the stub. catch exceptions here extra, to report them as originating from the user code 
	try {
		thread.setContextClassLoader(userCodeClassLoader);
		this.format.configure(formatContainer.getParameters(operatorIDAndOutputFormat.getKey()));
	}
	catch (Throwable t) {
		throw new RuntimeException("The user defined 'configure()' method in the Output Format caused an error: " 
			+ t.getMessage(), t);
	}
	finally {
		thread.setContextClassLoader(original);
	}
}
 
Example 18
Source Project: flink   Source File: CompletedCheckpointStoreTest.java    License: Apache License 2.0 5 votes vote down vote up
public TestCompletedCheckpoint(
		JobID jobId,
		long checkpointId,
		long timestamp,
		Map<OperatorID, OperatorState> operatorGroupState,
		CheckpointProperties props) {

	super(jobId, checkpointId, timestamp, Long.MAX_VALUE, operatorGroupState, null, props,
			new TestCompletedCheckpointStorageLocation());
}
 
Example 19
Source Project: flink   Source File: LatencyStats.java    License: Apache License 2.0 5 votes vote down vote up
public LatencyStats(
		MetricGroup metricGroup,
		int historySize,
		int subtaskIndex,
		OperatorID operatorID,
		Granularity granularity) {
	this.metricGroup = metricGroup;
	this.historySize = historySize;
	this.subtaskIndex = subtaskIndex;
	this.operatorId = operatorID;
	this.granularity = granularity;
}
 
Example 20
Source Project: flink   Source File: StreamTaskTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * This test checks that cancel calls that are issued before the operator is
 * instantiated still lead to proper canceling.
 */
@Test
public void testEarlyCanceling() throws Exception {
	final StreamConfig cfg = new StreamConfig(new Configuration());
	cfg.setOperatorID(new OperatorID(4711L, 42L));
	cfg.setStreamOperator(new SlowlyDeserializingOperator());
	cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	final TaskManagerActions taskManagerActions = spy(new NoOpTaskManagerActions());
	try (NettyShuffleEnvironment shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build()) {
		final Task task =  new TestTaskBuilder(shuffleEnvironment)
			.setInvokable(SourceStreamTask.class)
			.setTaskConfig(cfg.getConfiguration())
			.setTaskManagerActions(taskManagerActions)
			.build();

		final TaskExecutionState state = new TaskExecutionState(
			task.getJobID(), task.getExecutionId(), ExecutionState.RUNNING);

		task.startTaskThread();

		verify(taskManagerActions, timeout(2000L)).updateTaskExecutionState(eq(state));

		// send a cancel. because the operator takes a long time to deserialize, this should
		// hit the task before the operator is deserialized
		task.cancelExecution();

		task.getExecutingThread().join();

		assertFalse("Task did not cancel", task.getExecutingThread().isAlive());
		assertEquals(ExecutionState.CANCELED, task.getExecutionState());
	}
}
 
Example 21
Source Project: flink   Source File: TestingClusterClient.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<CoordinationResponse> sendCoordinationRequest(
		JobID jobId,
		OperatorID operatorId,
		CoordinationRequest request) {
	throw new UnsupportedOperationException();
}
 
Example 22
Source Project: flink   Source File: PendingCheckpoint.java    License: Apache License 2.0 5 votes vote down vote up
public TaskAcknowledgeResult acknowledgeCoordinatorState(
		OperatorInfo coordinatorInfo,
		@Nullable ByteStreamStateHandle stateHandle) {

	synchronized (lock) {
		if (discarded) {
			return TaskAcknowledgeResult.DISCARDED;
		}

		final OperatorID operatorId = coordinatorInfo.operatorId();
		OperatorState operatorState = operatorStates.get(operatorId);

		// sanity check for better error reporting
		if (!notYetAcknowledgedOperatorCoordinators.remove(operatorId)) {
			return operatorState != null && operatorState.getCoordinatorState() != null
					? TaskAcknowledgeResult.DUPLICATE
					: TaskAcknowledgeResult.UNKNOWN;
		}

		if (stateHandle != null) {
			if (operatorState == null) {
				operatorState = new OperatorState(
					operatorId, coordinatorInfo.currentParallelism(), coordinatorInfo.maxParallelism());
				operatorStates.put(operatorId, operatorState);
			}
			operatorState.setCoordinatorState(stateHandle);
		}

		return TaskAcknowledgeResult.SUCCESS;
	}
}
 
Example 23
Source Project: flink   Source File: ChannelPersistenceITCase.java    License: Apache License 2.0 5 votes vote down vote up
private TaskStateSnapshot toTaskStateSnapshot(ChannelStateWriteResult t) throws Exception {
	return new TaskStateSnapshot(singletonMap(new OperatorID(),
		new OperatorSubtaskState(
			StateObjectCollection.empty(),
			StateObjectCollection.empty(),
			StateObjectCollection.empty(),
			StateObjectCollection.empty(),
			new StateObjectCollection<>(t.getInputChannelStateHandles().get()),
			new StateObjectCollection<>(t.getResultSubpartitionStateHandles().get())
		)
	));
}
 
Example 24
Source Project: Flink-CEPplus   Source File: StateAssignmentOperation.java    License: Apache License 2.0 5 votes vote down vote up
private void reDistributeKeyedStates(
		List<OperatorState> oldOperatorStates,
		int newParallelism,
		List<OperatorID> newOperatorIDs,
		List<KeyGroupRange> newKeyGroupPartitions,
		Map<OperatorInstanceID, List<KeyedStateHandle>> newManagedKeyedState,
		Map<OperatorInstanceID, List<KeyedStateHandle>> newRawKeyedState) {
	//TODO: rewrite this method to only use OperatorID
	checkState(newOperatorIDs.size() == oldOperatorStates.size(),
		"This method still depends on the order of the new and old operators");

	for (int operatorIndex = 0; operatorIndex < newOperatorIDs.size(); operatorIndex++) {
		OperatorState operatorState = oldOperatorStates.get(operatorIndex);
		int oldParallelism = operatorState.getParallelism();
		for (int subTaskIndex = 0; subTaskIndex < newParallelism; subTaskIndex++) {
			OperatorInstanceID instanceID = OperatorInstanceID.of(subTaskIndex, newOperatorIDs.get(operatorIndex));
			Tuple2<List<KeyedStateHandle>, List<KeyedStateHandle>> subKeyedStates = reAssignSubKeyedStates(
				operatorState,
				newKeyGroupPartitions,
				subTaskIndex,
				newParallelism,
				oldParallelism);
			newManagedKeyedState.put(instanceID, subKeyedStates.f0);
			newRawKeyedState.put(instanceID, subKeyedStates.f1);
		}
	}
}
 
Example 25
Source Project: flink   Source File: OperatorCoordinatorHolderTest.java    License: Apache License 2.0 5 votes vote down vote up
private OperatorCoordinatorHolder createCoordinatorHolder(
		final BiFunction<SerializedValue<OperatorEvent>, Integer, CompletableFuture<Acknowledge>> eventSender,
		final Function<OperatorCoordinator.Context, OperatorCoordinator> coordinatorCtor,
		final ComponentMainThreadExecutor mainThreadExecutor) throws Exception {

	final OperatorID opId = new OperatorID();
	final OperatorCoordinator.Provider provider = new OperatorCoordinator.Provider() {
		@Override
		public OperatorID getOperatorId() {
			return opId;
		}

		@Override
		public OperatorCoordinator create(OperatorCoordinator.Context context) {
			return coordinatorCtor.apply(context);
		}
	};

	final OperatorCoordinatorHolder holder = OperatorCoordinatorHolder.create(
			opId,
			provider,
			eventSender,
			"test-coordinator-name",
			3,
			1775);

	holder.lazyInitialize(globalFailureHandler, mainThreadExecutor);
	holder.start();

	return holder;
}
 
Example 26
/**
 * This test verifies (for two input tasks) that the Stream tasks react the following way to
 * receiving a checkpoint cancellation barrier:
 *   - send a "decline checkpoint" notification out (to the JobManager)
 *   - emit a cancellation barrier downstream.
 */
@Test
public void testDeclineCallOnCancelBarrierTwoInputs() throws Exception {

	TwoInputStreamTaskTestHarness<String, String, String> testHarness = new TwoInputStreamTaskTestHarness<>(
			TwoInputStreamTask::new,
			BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
	testHarness.setupOutputForSingletonOperatorChain();

	StreamConfig streamConfig = testHarness.getStreamConfig();
	CoStreamMap<String, String, String> op = new CoStreamMap<>(new UnionCoMap());
	streamConfig.setStreamOperator(op);
	streamConfig.setOperatorID(new OperatorID());

	StreamMockEnvironment environment = spy(testHarness.createEnvironment());

	// start the task
	testHarness.invoke(environment);
	testHarness.waitForTaskRunning();

	// emit cancellation barriers
	testHarness.processEvent(new CancelCheckpointMarker(2L), 0, 0);
	testHarness.processEvent(new CancelCheckpointMarker(2L), 1, 0);
	testHarness.waitForInputProcessing();

	// the decline call should go to the coordinator
	verify(environment, times(1)).declineCheckpoint(eq(2L), any(CheckpointDeclineOnCancellationBarrierException.class));

	// a cancellation barrier should be downstream
	Object result = testHarness.getOutput().poll();
	assertNotNull("nothing emitted", result);
	assertTrue("wrong type emitted", result instanceof CancelCheckpointMarker);
	assertEquals("wrong checkpoint id", 2L, ((CancelCheckpointMarker) result).getCheckpointId());

	// cancel and shutdown
	testHarness.endInput();
	testHarness.waitForTaskCompletion();
}
 
Example 27
Source Project: Flink-CEPplus   Source File: OneInputStreamTaskTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * This test verifies that open() and close() are correctly called. This test also verifies
 * that timestamps of emitted elements are correct. {@link StreamMap} assigns the input
 * timestamp to emitted elements.
 */
@Test
public void testOpenCloseAndTimestamps() throws Exception {
	final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(
			OneInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);

	testHarness.setupOutputForSingletonOperatorChain();

	StreamConfig streamConfig = testHarness.getStreamConfig();
	StreamMap<String, String> mapOperator = new StreamMap<String, String>(new TestOpenCloseMapFunction());
	streamConfig.setStreamOperator(mapOperator);
	streamConfig.setOperatorID(new OperatorID());

	long initialTime = 0L;
	ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<Object>();

	testHarness.invoke();
	testHarness.waitForTaskRunning();

	testHarness.processElement(new StreamRecord<String>("Hello", initialTime + 1));
	testHarness.processElement(new StreamRecord<String>("Ciao", initialTime + 2));
	expectedOutput.add(new StreamRecord<String>("Hello", initialTime + 1));
	expectedOutput.add(new StreamRecord<String>("Ciao", initialTime + 2));

	testHarness.waitForInputProcessing();

	testHarness.endInput();

	testHarness.waitForTaskCompletion();

	assertTrue("RichFunction methods where not called.", TestOpenCloseMapFunction.closeCalled);

	TestHarnessUtil.assertOutputEquals("Output was not correct.",
		expectedOutput,
		testHarness.getOutput());
}
 
Example 28
Source Project: flink   Source File: OperatorIDGeneratorTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testOperatorIdMatchesUid() {
	OperatorID expectedId = getOperatorID();

	OperatorID generatedId = OperatorIDGenerator.fromUid(UID);

	Assert.assertEquals(expectedId, generatedId);
}
 
Example 29
Source Project: flink   Source File: StreamTaskTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testStateBackendLoadingAndClosing() throws Exception {
	Configuration taskManagerConfig = new Configuration();
	taskManagerConfig.setString(CheckpointingOptions.STATE_BACKEND, TestMemoryStateBackendFactory.class.getName());

	StreamConfig cfg = new StreamConfig(new Configuration());
	cfg.setStateKeySerializer(mock(TypeSerializer.class));
	cfg.setOperatorID(new OperatorID(4711L, 42L));
	TestStreamSource<Long, MockSourceFunction> streamSource = new TestStreamSource<>(new MockSourceFunction());
	cfg.setStreamOperator(streamSource);
	cfg.setTimeCharacteristic(TimeCharacteristic.ProcessingTime);

	try (ShuffleEnvironment shuffleEnvironment = new NettyShuffleEnvironmentBuilder().build()) {
		Task task = createTask(StateBackendTestSource.class, shuffleEnvironment, cfg, taskManagerConfig);

		StateBackendTestSource.fail = false;
		task.startTaskThread();

		// wait for clean termination
		task.getExecutingThread().join();

		// ensure that the state backends and stream iterables are closed ...
		verify(TestStreamSource.operatorStateBackend).close();
		verify(TestStreamSource.keyedStateBackend).close();
		verify(TestStreamSource.rawOperatorStateInputs).close();
		verify(TestStreamSource.rawKeyedStateInputs).close();
		// ... and disposed
		verify(TestStreamSource.operatorStateBackend).dispose();
		verify(TestStreamSource.keyedStateBackend).dispose();

		assertEquals(ExecutionState.FINISHED, task.getExecutionState());
	}
}
 
Example 30
Source Project: flink   Source File: BootstrapTransformation.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @param operatorID The operator id for the stream operator.
 * @param stateBackend The state backend for the job.
 * @param globalMaxParallelism Global max parallelism set for the savepoint.
 * @param savepointPath The path where the savepoint will be written.
 * @return The operator subtask states for this bootstrap transformation.
 */
DataSet<OperatorState> writeOperatorState(
	OperatorID operatorID,
	StateBackend stateBackend,
	int globalMaxParallelism,
	Path savepointPath) {
	int localMaxParallelism = getMaxParallelism(globalMaxParallelism);

	return writeOperatorSubtaskStates(operatorID, stateBackend, savepointPath, localMaxParallelism)
		.reduceGroup(new OperatorSubtaskStateReducer(operatorID, localMaxParallelism))
		.name("reduce(OperatorSubtaskState)");
}