Java Code Examples for org.apache.flink.util.SerializedValue

The following examples show how to use org.apache.flink.util.SerializedValue. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink   Source File: AbstractFetcher.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Shortcut variant of {@link #createPartitionStateHolders(Map, int, SerializedValue, SerializedValue, ClassLoader)}
 * that uses the same offset for all partitions when creating their state holders.
 */
private List<KafkaTopicPartitionState<KPH>> createPartitionStateHolders(
	List<KafkaTopicPartition> partitions,
	long initialOffset,
	int timestampWatermarkMode,
	SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
	SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
	ClassLoader userCodeClassLoader) throws IOException, ClassNotFoundException {

	Map<KafkaTopicPartition, Long> partitionsToInitialOffset = new HashMap<>(partitions.size());
	for (KafkaTopicPartition partition : partitions) {
		partitionsToInitialOffset.put(partition, initialOffset);
	}

	return createPartitionStateHolders(
			partitionsToInitialOffset,
			timestampWatermarkMode,
			watermarksPeriodic,
			watermarksPunctuated,
			userCodeClassLoader);
}
 
Example 2
Source Project: flink   Source File: CoordinatorEventsExactlyOnceITCase.java    License: Apache License 2.0 6 votes vote down vote up
private static JobVertex buildJobVertex(String name, int numEvents, int delay, String accName) throws IOException {
	final JobVertex vertex = new JobVertex(name);
	final OperatorID opId = OperatorID.fromJobVertexID(vertex.getID());

	vertex.setParallelism(1);
	vertex.setInvokableClass(EventCollectingTask.class);
	vertex.getConfiguration().setString(ACC_NAME, accName);

	final OperatorCoordinator.Provider provider = new OperatorCoordinator.Provider() {

		@Override
		public OperatorID getOperatorId() {
			return opId;
		}

		@Override
		public OperatorCoordinator create(OperatorCoordinator.Context context) {
			return new EventSendingCoordinator(context, numEvents, delay);
		}
	};

	vertex.addOperatorCoordinator(new SerializedValue<>(provider));

	return vertex;
}
 
Example 3
Source Project: flink   Source File: OperatorEventDispatcherImpl.java    License: Apache License 2.0 6 votes vote down vote up
void dispatchEventToHandlers(OperatorID operatorID, SerializedValue<OperatorEvent> serializedEvent) throws FlinkException {
	final OperatorEvent evt;
	try {
		evt = serializedEvent.deserializeValue(classLoader);
	}
	catch (IOException | ClassNotFoundException e) {
		throw new FlinkException("Could not deserialize operator event", e);
	}

	final OperatorEventHandler handler = handlers.get(operatorID);
	if (handler != null) {
		handler.handleOperatorEvent(evt);
	}
	else {
		throw new FlinkException("Operator not registered for operator events");
	}
}
 
Example 4
Source Project: flink   Source File: AbstractFetcher.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Shortcut variant of {@link #createPartitionStateHolders(Map, int, SerializedValue, ClassLoader)}
 * that uses the same offset for all partitions when creating their state holders.
 */
private List<KafkaTopicPartitionState<T, KPH>> createPartitionStateHolders(
	List<KafkaTopicPartition> partitions,
	long initialOffset,
	int timestampWatermarkMode,
	SerializedValue<WatermarkStrategy<T>> watermarkStrategy,
	ClassLoader userCodeClassLoader) throws IOException, ClassNotFoundException {

	Map<KafkaTopicPartition, Long> partitionsToInitialOffset = new HashMap<>(partitions.size());
	for (KafkaTopicPartition partition : partitions) {
		partitionsToInitialOffset.put(partition, initialOffset);
	}

	return createPartitionStateHolders(
			partitionsToInitialOffset,
			timestampWatermarkMode,
			watermarkStrategy,
			userCodeClassLoader);
}
 
Example 5
Source Project: flink   Source File: TaskDeploymentDescriptorTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testOffLoadedAndNonOffLoadedPayload() {
	final TaskDeploymentDescriptor taskDeploymentDescriptor = createTaskDeploymentDescriptor(
		new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobInformation),
		new TaskDeploymentDescriptor.Offloaded<>(new PermanentBlobKey()));

	SerializedValue<JobInformation> actualSerializedJobInformation = taskDeploymentDescriptor.getSerializedJobInformation();
	assertThat(actualSerializedJobInformation, is(serializedJobInformation));

	try {
		taskDeploymentDescriptor.getSerializedTaskInformation();
		fail("Expected to fail since the task information should be offloaded.");
	} catch (IllegalStateException expected) {
		// expected
	}
}
 
Example 6
Source Project: flink-connectors   Source File: FlinkPravegaReaderTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a {@link TestableFlinkPravegaReader} with event time and watermarking.
 */
private static TestableFlinkPravegaReader<Integer> createReaderWithWatermark(AssignerWithTimeWindows<Integer> assignerWithTimeWindows) {
    ClientConfig clientConfig = ClientConfig.builder().build();
    ReaderGroupConfig rgConfig = ReaderGroupConfig.builder().stream(SAMPLE_STREAM).build();
    boolean enableMetrics = true;

    try {
        ClosureCleaner.clean(assignerWithTimeWindows, ExecutionConfig.ClosureCleanerLevel.RECURSIVE, true);
        SerializedValue<AssignerWithTimeWindows<Integer>> serializedAssigner =
                new SerializedValue<>(assignerWithTimeWindows);
        return new TestableFlinkPravegaReader<>(
                "hookUid", clientConfig, rgConfig, SAMPLE_SCOPE, GROUP_NAME, DESERIALIZATION_SCHEMA,
                serializedAssigner, READER_TIMEOUT, CHKPT_TIMEOUT, enableMetrics);
    } catch (IOException e) {
        throw new IllegalArgumentException("The given assigner is not serializable", e);
    }
}
 
Example 7
Source Project: flink   Source File: TaskDeploymentDescriptorTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testOffLoadedAndNonOffLoadedPayload() {
	final TaskDeploymentDescriptor taskDeploymentDescriptor = createTaskDeploymentDescriptor(
		new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobInformation),
		new TaskDeploymentDescriptor.Offloaded<>(new PermanentBlobKey()));

	SerializedValue<JobInformation> actualSerializedJobInformation = taskDeploymentDescriptor.getSerializedJobInformation();
	assertThat(actualSerializedJobInformation, is(serializedJobInformation));

	try {
		taskDeploymentDescriptor.getSerializedTaskInformation();
		fail("Expected to fail since the task information should be offloaded.");
	} catch (IllegalStateException expected) {
		// expected
	}
}
 
Example 8
Source Project: Flink-CEPplus   Source File: AkkaRpcActor.java    License: Apache License 2.0 6 votes vote down vote up
private void sendAsyncResponse(CompletableFuture<?> asyncResponse, String methodName) {
	final ActorRef sender = getSender();
	Promise.DefaultPromise<Object> promise = new Promise.DefaultPromise<>();

	asyncResponse.whenComplete(
		(value, throwable) -> {
			if (throwable != null) {
				promise.failure(throwable);
			} else {
				if (isRemoteSender(sender)) {
					Either<SerializedValue<?>, AkkaRpcException> serializedResult = serializeRemoteResultAndVerifySize(value, methodName);

					if (serializedResult.isLeft()) {
						promise.success(serializedResult.left());
					} else {
						promise.failure(serializedResult.right());
					}
				} else {
					promise.success(value);
				}
			}
		});

	Patterns.pipe(promise.future(), getContext().dispatcher()).to(sender);
}
 
Example 9
Source Project: flink   Source File: AccumulatorHelper.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Takes the serialized accumulator results and tries to deserialize them using the provided
 * class loader.
 * @param serializedAccumulators The serialized accumulator results.
 * @param loader The class loader to use.
 * @return The deserialized accumulator results.
 * @throws IOException
 * @throws ClassNotFoundException
 */
public static Map<String, OptionalFailure<Object>> deserializeAccumulators(
		Map<String, SerializedValue<OptionalFailure<Object>>> serializedAccumulators,
		ClassLoader loader) throws IOException, ClassNotFoundException {

	if (serializedAccumulators == null || serializedAccumulators.isEmpty()) {
		return Collections.emptyMap();
	}

	Map<String, OptionalFailure<Object>> accumulators = new HashMap<>(serializedAccumulators.size());

	for (Map.Entry<String, SerializedValue<OptionalFailure<Object>>> entry : serializedAccumulators.entrySet()) {

		OptionalFailure<Object> value = null;
		if (entry.getValue() != null) {
			value = entry.getValue().deserializeValue(loader);
		}

		accumulators.put(entry.getKey(), value);
	}

	return accumulators;
}
 
Example 10
Source Project: Flink-CEPplus   Source File: JobCheckpointingSettings.java    License: Apache License 2.0 6 votes vote down vote up
public JobCheckpointingSettings(
		List<JobVertexID> verticesToTrigger,
		List<JobVertexID> verticesToAcknowledge,
		List<JobVertexID> verticesToConfirm,
		CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration,
		@Nullable SerializedValue<StateBackend> defaultStateBackend,
		@Nullable SerializedValue<MasterTriggerRestoreHook.Factory[]> masterHooks) {


	this.verticesToTrigger = requireNonNull(verticesToTrigger);
	this.verticesToAcknowledge = requireNonNull(verticesToAcknowledge);
	this.verticesToConfirm = requireNonNull(verticesToConfirm);
	this.checkpointCoordinatorConfiguration = Preconditions.checkNotNull(checkpointCoordinatorConfiguration);
	this.defaultStateBackend = defaultStateBackend;
	this.masterHooks = masterHooks;
}
 
Example 11
Source Project: flink   Source File: AbstractFetcherWatermarksTest.java    License: Apache License 2.0 6 votes vote down vote up
TestFetcher(
		SourceContext<T> sourceContext,
		Map<KafkaTopicPartition, Long> assignedPartitionsWithStartOffsets,
		SerializedValue<WatermarkStrategy<T>> watermarkStrategy,
		ProcessingTimeService processingTimeProvider,
		long autoWatermarkInterval) throws Exception {
	super(
			sourceContext,
			assignedPartitionsWithStartOffsets,
			watermarkStrategy,
			processingTimeProvider,
			autoWatermarkInterval,
			TestFetcher.class.getClassLoader(),
			new UnregisteredMetricsGroup(),
			false);
}
 
Example 12
Source Project: flink   Source File: RestClusterClientTest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
@SuppressWarnings("unchecked")
protected CompletableFuture<ClientCoordinationResponseBody> handleRequest(@Nonnull HandlerRequest<ClientCoordinationRequestBody, ClientCoordinationMessageParameters> request, @Nonnull DispatcherGateway gateway) throws RestHandlerException {
	try {
		TestCoordinationRequest req =
			(TestCoordinationRequest) request
				.getRequestBody()
				.getSerializedCoordinationRequest()
				.deserializeValue(getClass().getClassLoader());
		TestCoordinationResponse resp = new TestCoordinationResponse(req.payload);
		return CompletableFuture.completedFuture(
			new ClientCoordinationResponseBody(
				new SerializedValue<>(resp)));
	} catch (Exception e) {
		return FutureUtils.completedExceptionally(e);
	}
}
 
Example 13
Source Project: flink   Source File: AkkaRpcActor.java    License: Apache License 2.0 6 votes vote down vote up
private Either<SerializedValue<?>, AkkaRpcException> serializeRemoteResultAndVerifySize(Object result, String methodName) {
	try {
		SerializedValue<?> serializedResult = new SerializedValue<>(result);

		long resultSize = serializedResult.getByteArray().length;
		if (resultSize > maximumFramesize) {
			return Either.Right(new AkkaRpcException(
				"The method " + methodName + "'s result size " + resultSize
					+ " exceeds the maximum size " + maximumFramesize + " ."));
		} else {
			return Either.Left(serializedResult);
		}
	} catch (IOException e) {
		return Either.Right(new AkkaRpcException(
			"Failed to serialize the result for RPC call : " + methodName + '.', e));
	}
}
 
Example 14
Source Project: Flink-CEPplus   Source File: ExecutionJobVertex.java    License: Apache License 2.0 6 votes vote down vote up
public Either<SerializedValue<TaskInformation>, PermanentBlobKey> getTaskInformationOrBlobKey() throws IOException {
	// only one thread should offload the task information, so let's also let only one thread
	// serialize the task information!
	synchronized (stateMonitor) {
		if (taskInformationOrBlobKey == null) {
			final BlobWriter blobWriter = graph.getBlobWriter();

			final TaskInformation taskInformation = new TaskInformation(
				jobVertex.getID(),
				jobVertex.getName(),
				parallelism,
				maxParallelism,
				jobVertex.getInvokableClassName(),
				jobVertex.getConfiguration());

			taskInformationOrBlobKey = BlobWriter.serializeAndTryOffload(
				taskInformation,
				getJobId(),
				blobWriter);
		}

		return taskInformationOrBlobKey;
	}
}
 
Example 15
@Test
public void testOffLoadedAndNonOffLoadedPayload() {
	final TaskDeploymentDescriptor taskDeploymentDescriptor = createTaskDeploymentDescriptor(
		new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobInformation),
		new TaskDeploymentDescriptor.Offloaded<>(new PermanentBlobKey()));

	SerializedValue<JobInformation> actualSerializedJobInformation = taskDeploymentDescriptor.getSerializedJobInformation();
	assertThat(actualSerializedJobInformation, is(serializedJobInformation));

	try {
		taskDeploymentDescriptor.getSerializedTaskInformation();
		fail("Expected to fail since the task information should be offloaded.");
	} catch (IllegalStateException expected) {
		// expected
	}
}
 
Example 16
Source Project: flink   Source File: BlobWriter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Serializes the given value and offloads it to the BlobServer if its size exceeds the minimum
 * offloading size of the BlobServer.
 *
 * @param value to serialize
 * @param jobId to which the value belongs.
 * @param blobWriter to use to offload the serialized value
 * @param <T> type of the value to serialize
 * @return Either the serialized value or the stored blob key
 * @throws IOException if the data cannot be serialized
 */
static <T> Either<SerializedValue<T>, PermanentBlobKey> serializeAndTryOffload(
		T value,
		JobID jobId,
		BlobWriter blobWriter) throws IOException {
	Preconditions.checkNotNull(value);
	Preconditions.checkNotNull(jobId);
	Preconditions.checkNotNull(blobWriter);

	final SerializedValue<T> serializedValue = new SerializedValue<>(value);

	if (serializedValue.getByteArray().length < blobWriter.getMinOffloadingSize()) {
		return Either.Left(new SerializedValue<>(value));
	} else {
		try {
			final PermanentBlobKey permanentBlobKey = blobWriter.putPermanent(jobId, serializedValue.getByteArray());

			return Either.Right(permanentBlobKey);
		} catch (IOException e) {
			LOG.warn("Failed to offload value {} for job {} to BLOB store.", value, jobId, e);

			return Either.Left(serializedValue);
		}
	}
}
 
Example 17
Source Project: flink   Source File: TestEventSender.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<Acknowledge> apply(SerializedValue<OperatorEvent> event, Integer subtask) {
	final OperatorEvent deserializedEvent;
	try {
		deserializedEvent = event.deserializeValue(getClass().getClassLoader());
	} catch (IOException | ClassNotFoundException e) {
		throw new AssertionError(e);
	}
	events.add(new EventWithSubtask(deserializedEvent, subtask));

	return failureCause == null
			? CompletableFuture.completedFuture(Acknowledge.get())
			: FutureUtils.completedExceptionally(failureCause);
}
 
Example 18
Source Project: flink   Source File: TestingTaskExecutorGateway.java    License: Apache License 2.0 5 votes vote down vote up
TestingTaskExecutorGateway(
		String address,
		String hostname,
		BiConsumer<ResourceID, AllocatedSlotReport> heartbeatJobManagerConsumer,
		BiConsumer<JobID, Throwable> disconnectJobManagerConsumer,
		BiFunction<TaskDeploymentDescriptor, JobMasterId, CompletableFuture<Acknowledge>> submitTaskConsumer,
		Function<Tuple6<SlotID, JobID, AllocationID, ResourceProfile, String, ResourceManagerId>, CompletableFuture<Acknowledge>> requestSlotFunction,
		BiFunction<AllocationID, Throwable, CompletableFuture<Acknowledge>> freeSlotFunction,
		Consumer<ResourceID> heartbeatResourceManagerConsumer,
		Consumer<Exception> disconnectResourceManagerConsumer,
		Function<ExecutionAttemptID, CompletableFuture<Acknowledge>> cancelTaskFunction,
		Supplier<CompletableFuture<Boolean>> canBeReleasedSupplier,
		TriConsumer<JobID, Set<ResultPartitionID>, Set<ResultPartitionID>> releaseOrPromotePartitionsConsumer,
		Consumer<Collection<IntermediateDataSetID>> releaseClusterPartitionsConsumer,
		TriFunction<ExecutionAttemptID, OperatorID, SerializedValue<OperatorEvent>, CompletableFuture<Acknowledge>> operatorEventHandler,
		Supplier<CompletableFuture<ThreadDumpInfo>> requestThreadDumpSupplier) {

	this.address = Preconditions.checkNotNull(address);
	this.hostname = Preconditions.checkNotNull(hostname);
	this.heartbeatJobManagerConsumer = Preconditions.checkNotNull(heartbeatJobManagerConsumer);
	this.disconnectJobManagerConsumer = Preconditions.checkNotNull(disconnectJobManagerConsumer);
	this.submitTaskConsumer = Preconditions.checkNotNull(submitTaskConsumer);
	this.requestSlotFunction = Preconditions.checkNotNull(requestSlotFunction);
	this.freeSlotFunction = Preconditions.checkNotNull(freeSlotFunction);
	this.heartbeatResourceManagerConsumer = heartbeatResourceManagerConsumer;
	this.disconnectResourceManagerConsumer = disconnectResourceManagerConsumer;
	this.cancelTaskFunction = cancelTaskFunction;
	this.canBeReleasedSupplier = canBeReleasedSupplier;
	this.releaseOrPromotePartitionsConsumer = releaseOrPromotePartitionsConsumer;
	this.releaseClusterPartitionsConsumer = releaseClusterPartitionsConsumer;
	this.operatorEventHandler = operatorEventHandler;
	this.requestThreadDumpSupplier = requestThreadDumpSupplier;
}
 
Example 19
Source Project: flink   Source File: JobResultSerializer.java    License: Apache License 2.0 5 votes vote down vote up
public JobResultSerializer() {
	super(JobResult.class);

	final JavaType objectSerializedValueType = TypeFactory.defaultInstance()
		.constructType(new TypeReference<SerializedValue<Object>>() {
		});
	serializedValueSerializer = new SerializedValueSerializer(objectSerializedValueType);
}
 
Example 20
Source Project: flink   Source File: JobCheckpointingSettings.java    License: Apache License 2.0 5 votes vote down vote up
public JobCheckpointingSettings(
		List<JobVertexID> verticesToTrigger,
		List<JobVertexID> verticesToAcknowledge,
		List<JobVertexID> verticesToConfirm,
		CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration,
		@Nullable SerializedValue<StateBackend> defaultStateBackend) {

	this(
		verticesToTrigger,
		verticesToAcknowledge,
		verticesToConfirm,
		checkpointCoordinatorConfiguration,
		defaultStateBackend,
		null);
}
 
Example 21
Source Project: flink   Source File: MiniCluster.java    License: Apache License 2.0 5 votes vote down vote up
public CompletableFuture<CoordinationResponse> deliverCoordinationRequestToCoordinator(
		JobID jobId,
		OperatorID operatorId,
		SerializedValue<CoordinationRequest> serializedRequest) {
	return runDispatcherCommand(
		dispatcherGateway ->
			dispatcherGateway.deliverCoordinationRequestToCoordinator(
				jobId, operatorId, serializedRequest, rpcTimeout));
}
 
Example 22
@Override
protected void checkTaskOffloaded(ExecutionGraph eg, JobVertexID jobVertexId) throws Exception {
	Either<SerializedValue<TaskInformation>, PermanentBlobKey> taskInformationOrBlobKey = eg.getJobVertex(jobVertexId).getTaskInformationOrBlobKey();

	assertTrue(taskInformationOrBlobKey.isRight());

	// must not throw:
	blobServer.getFile(eg.getJobID(), taskInformationOrBlobKey.right());
}
 
Example 23
Source Project: Flink-CEPplus   Source File: FlinkKafkaConsumerBaseTest.java    License: Apache License 2.0 5 votes vote down vote up
@Override
@SuppressWarnings("unchecked")
protected AbstractFetcher<T, ?> createFetcher(
		SourceContext<T> sourceContext,
		Map<KafkaTopicPartition, Long> thisSubtaskPartitionsWithStartOffsets,
		SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
		SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
		StreamingRuntimeContext runtimeContext,
		OffsetCommitMode offsetCommitMode,
		MetricGroup consumerMetricGroup,
		boolean useMetrics) throws Exception {
	return testFetcherSupplier.get();
}
 
Example 24
Source Project: flink   Source File: Kafka010Fetcher.java    License: Apache License 2.0 5 votes vote down vote up
public Kafka010Fetcher(
		SourceContext<T> sourceContext,
		Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets,
		SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
		SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
		ProcessingTimeService processingTimeProvider,
		long autoWatermarkInterval,
		ClassLoader userCodeClassLoader,
		String taskNameWithSubtasks,
		KafkaDeserializationSchema<T> deserializer,
		Properties kafkaProperties,
		long pollTimeout,
		MetricGroup subtaskMetricGroup,
		MetricGroup consumerMetricGroup,
		boolean useMetrics,
		FlinkConnectorRateLimiter rateLimiter) throws Exception {
	super(
			sourceContext,
			assignedPartitionsWithInitialOffsets,
			watermarksPeriodic,
			watermarksPunctuated,
			processingTimeProvider,
			autoWatermarkInterval,
			userCodeClassLoader,
			taskNameWithSubtasks,
			deserializer,
			kafkaProperties,
			pollTimeout,
			subtaskMetricGroup,
			consumerMetricGroup,
			useMetrics, rateLimiter);
}
 
Example 25
Source Project: flink   Source File: AbstractFetcherWatermarksTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testPeriodicWatermarksWithNoSubscribedPartitionsShouldYieldNoWatermarks() throws Exception {
	final String testTopic = "test topic name";
	Map<KafkaTopicPartition, Long> originalPartitions = new HashMap<>();

	TestSourceContext<Long> sourceContext = new TestSourceContext<>();

	TestProcessingTimeService processingTimeProvider = new TestProcessingTimeService();

	TestFetcher<Long> fetcher = new TestFetcher<>(
			sourceContext,
			originalPartitions,
			new SerializedValue<>(testWmStrategy),
			processingTimeProvider,
			10);

	processingTimeProvider.setCurrentTime(10);
	// no partitions; when the periodic watermark emitter fires, no watermark should be emitted
	assertFalse(sourceContext.hasWatermark());

	// counter-test that when the fetcher does actually have partitions,
	// when the periodic watermark emitter fires again, a watermark really is emitted
	fetcher.addDiscoveredPartitions(Collections.singletonList(
			new KafkaTopicPartition(testTopic, 0)));
	emitRecord(fetcher, 100L, fetcher.subscribedPartitionStates().get(0), 3L);
	processingTimeProvider.setCurrentTime(20);
	assertEquals(100, sourceContext.getLatestWatermark().getTimestamp());
}
 
Example 26
Source Project: flink   Source File: SerializedValueSerializerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() {
	objectMapper = new ObjectMapper();
	final SimpleModule simpleModule = new SimpleModule();
	final JavaType serializedValueWildcardType = objectMapper
		.getTypeFactory()
		.constructType(new TypeReference<SerializedValue<?>>() {
		});
	simpleModule.addSerializer(new SerializedValueSerializer(serializedValueWildcardType));
	simpleModule.addDeserializer(
		SerializedValue.class,
		new SerializedValueDeserializer(serializedValueWildcardType));
	objectMapper.registerModule(simpleModule);
}
 
Example 27
Source Project: flink   Source File: JobMaster.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<Acknowledge> sendOperatorEventToCoordinator(
		final ExecutionAttemptID task,
		final OperatorID operatorID,
		final SerializedValue<OperatorEvent> serializedEvent) {

	try {
		final OperatorEvent evt = serializedEvent.deserializeValue(userCodeLoader);
		schedulerNG.deliverOperatorEventToCoordinator(task, operatorID, evt);
		return CompletableFuture.completedFuture(Acknowledge.get());
	} catch (Exception e) {
		return FutureUtils.completedExceptionally(e);
	}
}
 
Example 28
Source Project: flink   Source File: OperatorEventDispatcherImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void sendEventToCoordinator(OperatorEvent event) {
	final SerializedValue<OperatorEvent> serializedEvent;
	try {
		serializedEvent = new SerializedValue<>(event);
	}
	catch (IOException e) {
		// this is not a recoverable situation, so we wrap this in an
		// unchecked exception and let it bubble up
		throw new FlinkRuntimeException("Cannot serialize operator event", e);
	}

	toCoordinator.sendOperatorEventToCoordinator(operatorId, serializedEvent);
}
 
Example 29
Source Project: flink   Source File: OperatorEventValveTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void eventsBlockedByClosedValve() throws Exception {
	final TestEventSender sender = new TestEventSender();
	final OperatorEventValve valve = new OperatorEventValve(sender);

	valve.markForCheckpoint(1L);
	valve.shutValve(1L);

	final CompletableFuture<Acknowledge> future =
			valve.sendEvent(new SerializedValue<>(new TestOperatorEvent()), 1);

	assertTrue(sender.events.isEmpty());
	assertFalse(future.isDone());
}
 
Example 30
Source Project: Flink-CEPplus   Source File: JobAccumulatorsInfo.java    License: Apache License 2.0 5 votes vote down vote up
@JsonCreator
public JobAccumulatorsInfo(
		@JsonProperty(FIELD_NAME_JOB_ACCUMULATORS) List<JobAccumulator> jobAccumulators,
		@JsonProperty(FIELD_NAME_USER_TASK_ACCUMULATORS) List<UserTaskAccumulator> userAccumulators,
		@JsonDeserialize(contentUsing = SerializedValueDeserializer.class) @JsonProperty(FIELD_NAME_SERIALIZED_USER_TASK_ACCUMULATORS) Map<String, SerializedValue<OptionalFailure<Object>>> serializedUserAccumulators) {
	this.jobAccumulators = Preconditions.checkNotNull(jobAccumulators);
	this.userAccumulators = Preconditions.checkNotNull(userAccumulators);
	this.serializedUserAccumulators = Preconditions.checkNotNull(serializedUserAccumulators);
}