Java Code Examples for org.apache.flink.util.function.FunctionWithException

The following examples show how to use org.apache.flink.util.function.FunctionWithException. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
/**
 * Single constructor to initialize all. Actual setup of the parts happens in the
 * factory methods.
 */
S3RecoverableFsDataOutputStream(
		RecoverableMultiPartUpload upload,
		FunctionWithException<File, RefCountedFile, IOException> tempFileCreator,
		RefCountedFSOutputStream initialTmpFile,
		long userDefinedMinPartSize,
		long bytesBeforeCurrentPart) {

	checkArgument(bytesBeforeCurrentPart >= 0L);

	this.upload = checkNotNull(upload);
	this.tmpFileProvider = checkNotNull(tempFileCreator);
	this.userDefinedMinPartSize = userDefinedMinPartSize;

	this.fileStream = initialTmpFile;
	this.bytesBeforeCurrentPart = bytesBeforeCurrentPart;
}
 
Example 2
public static S3RecoverableFsDataOutputStream newStream(
		final RecoverableMultiPartUpload upload,
		final FunctionWithException<File, RefCountedFile, IOException> tmpFileCreator,
		final long userDefinedMinPartSize) throws IOException {

	checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE);

	final RefCountedBufferingFileStream fileStream = boundedBufferingFileStream(tmpFileCreator, Optional.empty());

	return new S3RecoverableFsDataOutputStream(
			upload,
			tmpFileCreator,
			fileStream,
			userDefinedMinPartSize,
			0L);
}
 
Example 3
public static S3RecoverableFsDataOutputStream recoverStream(
		final RecoverableMultiPartUpload upload,
		final FunctionWithException<File, RefCountedFile, IOException> tmpFileCreator,
		final long userDefinedMinPartSize,
		final long bytesBeforeCurrentPart) throws IOException {

	checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE);

	final RefCountedBufferingFileStream fileStream = boundedBufferingFileStream(
			tmpFileCreator,
			upload.getIncompletePart());

	return new S3RecoverableFsDataOutputStream(
			upload,
			tmpFileCreator,
			fileStream,
			userDefinedMinPartSize,
			bytesBeforeCurrentPart);
}
 
Example 4
Source Project: Flink-CEPplus   Source File: S3RecoverableWriter.java    License: Apache License 2.0 6 votes vote down vote up
public static S3RecoverableWriter writer(
		final FileSystem fs,
		final FunctionWithException<File, RefCountedFile, IOException> tempFileCreator,
		final S3AccessHelper s3AccessHelper,
		final Executor uploadThreadPool,
		final long userDefinedMinPartSize,
		final int maxConcurrentUploadsPerStream) {

	checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE);

	final S3RecoverableMultipartUploadFactory uploadFactory =
			new S3RecoverableMultipartUploadFactory(
					fs,
					s3AccessHelper,
					maxConcurrentUploadsPerStream,
					uploadThreadPool,
					tempFileCreator);

	return new S3RecoverableWriter(s3AccessHelper, uploadFactory, tempFileCreator, userDefinedMinPartSize);
}
 
Example 5
Source Project: flink   Source File: S3RecoverableFsDataOutputStream.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Single constructor to initialize all. Actual setup of the parts happens in the
 * factory methods.
 */
S3RecoverableFsDataOutputStream(
		RecoverableMultiPartUpload upload,
		FunctionWithException<File, RefCountedFile, IOException> tempFileCreator,
		RefCountedFSOutputStream initialTmpFile,
		long userDefinedMinPartSize,
		long bytesBeforeCurrentPart) {

	checkArgument(bytesBeforeCurrentPart >= 0L);

	this.upload = checkNotNull(upload);
	this.tmpFileProvider = checkNotNull(tempFileCreator);
	this.userDefinedMinPartSize = userDefinedMinPartSize;

	this.fileStream = initialTmpFile;
	this.bytesBeforeCurrentPart = bytesBeforeCurrentPart;
}
 
Example 6
Source Project: flink   Source File: S3RecoverableFsDataOutputStream.java    License: Apache License 2.0 6 votes vote down vote up
public static S3RecoverableFsDataOutputStream newStream(
		final RecoverableMultiPartUpload upload,
		final FunctionWithException<File, RefCountedFile, IOException> tmpFileCreator,
		final long userDefinedMinPartSize) throws IOException {

	checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE);

	final RefCountedBufferingFileStream fileStream = boundedBufferingFileStream(tmpFileCreator, Optional.empty());

	return new S3RecoverableFsDataOutputStream(
			upload,
			tmpFileCreator,
			fileStream,
			userDefinedMinPartSize,
			0L);
}
 
Example 7
Source Project: flink   Source File: S3RecoverableFsDataOutputStream.java    License: Apache License 2.0 6 votes vote down vote up
public static S3RecoverableFsDataOutputStream recoverStream(
		final RecoverableMultiPartUpload upload,
		final FunctionWithException<File, RefCountedFile, IOException> tmpFileCreator,
		final long userDefinedMinPartSize,
		final long bytesBeforeCurrentPart) throws IOException {

	checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE);

	final RefCountedBufferingFileStream fileStream = boundedBufferingFileStream(
			tmpFileCreator,
			upload.getIncompletePart());

	return new S3RecoverableFsDataOutputStream(
			upload,
			tmpFileCreator,
			fileStream,
			userDefinedMinPartSize,
			bytesBeforeCurrentPart);
}
 
Example 8
Source Project: flink   Source File: OneInputStreamTaskTestHarness.java    License: Apache License 2.0 6 votes vote down vote up
public OneInputStreamTaskTestHarness(
	FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory,
	int numInputGates,
	int numInputChannelsPerGate,
	TypeInformation<IN> inputType,
	TypeInformation<OUT> outputType,
	File localRootDir) {
	super(taskFactory, outputType, localRootDir);

	this.inputType = inputType;
	inputSerializer = inputType.createSerializer(executionConfig);

	this.numInputGates = numInputGates;
	this.numInputChannelsPerGate = numInputChannelsPerGate;

	streamConfig.setStateKeySerializer(inputSerializer);
}
 
Example 9
Source Project: flink   Source File: ResultPartition.java    License: Apache License 2.0 6 votes vote down vote up
public ResultPartition(
	String owningTaskName,
	ResultPartitionID partitionId,
	ResultPartitionType partitionType,
	ResultSubpartition[] subpartitions,
	int numTargetKeyGroups,
	ResultPartitionManager partitionManager,
	FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) {

	this.owningTaskName = checkNotNull(owningTaskName);
	this.partitionId = checkNotNull(partitionId);
	this.partitionType = checkNotNull(partitionType);
	this.subpartitions = checkNotNull(subpartitions);
	this.numTargetKeyGroups = numTargetKeyGroups;
	this.partitionManager = checkNotNull(partitionManager);
	this.bufferPoolFactory = bufferPoolFactory;
}
 
Example 10
Source Project: flink   Source File: TwoInputStreamTaskTestHarness.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a test harness with the specified number of input gates and specified number
 * of channels per input gate. Parameter inputGateAssignment specifies for each gate whether
 * it should be assigned to the first (1), or second (2) input of the task.
 */
public TwoInputStreamTaskTestHarness(
		FunctionWithException<Environment, ? extends AbstractTwoInputStreamTask<IN1, IN2, OUT>, Exception> taskFactory,
		int numInputGates,
		int numInputChannelsPerGate,
		int[] inputGateAssignment,
		TypeInformation<IN1> inputType1,
		TypeInformation<IN2> inputType2,
		TypeInformation<OUT> outputType) {

	super(taskFactory, outputType);

	inputSerializer1 = inputType1.createSerializer(executionConfig);

	inputSerializer2 = inputType2.createSerializer(executionConfig);

	this.numInputGates = numInputGates;
	this.numInputChannelsPerGate = numInputChannelsPerGate;
	this.inputGateAssignment = inputGateAssignment;
}
 
Example 11
Source Project: flink   Source File: TestingExecutor.java    License: Apache License 2.0 6 votes vote down vote up
TestingExecutor(
		List<SupplierWithException<TypedResult<List<Tuple2<Boolean, Row>>>, SqlExecutionException>> resultChanges,
		List<SupplierWithException<TypedResult<Integer>, SqlExecutionException>> snapshotResults,
		List<SupplierWithException<List<Row>, SqlExecutionException>> resultPages,
		BiConsumerWithException<String, String, SqlExecutionException> useCatalogConsumer,
		BiConsumerWithException<String, String, SqlExecutionException> useDatabaseConsumer,
		BiFunctionWithException<String, String, TableResult, SqlExecutionException> executeSqlConsumer,
		TriFunctionWithException<String, String, String, Void, SqlExecutionException> setSessionPropertyFunction,
		FunctionWithException<String, Void, SqlExecutionException> resetSessionPropertiesFunction) {
	this.resultChanges = resultChanges;
	this.snapshotResults = snapshotResults;
	this.resultPages = resultPages;
	this.useCatalogConsumer = useCatalogConsumer;
	this.useDatabaseConsumer = useDatabaseConsumer;
	this.executeSqlConsumer = executeSqlConsumer;
	this.setSessionPropertyFunction = setSessionPropertyFunction;
	this.resetSessionPropertiesFunction = resetSessionPropertiesFunction;
	helper = new SqlParserHelper();
	helper.registerTables();
}
 
Example 12
Source Project: flink   Source File: ConfigUtils.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Gets a {@link List} of values of type {@code IN} from a {@link ReadableConfig}
 * and transforms it to a {@link List} of type {@code OUT} based on the provided {@code mapper} function.
 *
 * @param configuration the configuration object to get the value out of
 * @param key the {@link ConfigOption option} to serve as the key for the list in the configuration
 * @param mapper the transformation function from {@code IN} to {@code OUT}.
 * @return the transformed values in a list of type {@code OUT}.
 */
public static <IN, OUT, E extends Throwable> List<OUT> decodeListFromConfig(
		final ReadableConfig configuration,
		final ConfigOption<List<IN>> key,
		final FunctionWithException<IN, OUT, E> mapper) throws E {

	checkNotNull(configuration);
	checkNotNull(key);
	checkNotNull(mapper);

	final List<IN> encodedString = configuration.get(key);
	if (encodedString == null || encodedString.isEmpty()) {
		return new ArrayList<>();
	}

	final List<OUT> result = new ArrayList<>(encodedString.size());
	for (IN input : encodedString) {
		result.add(mapper.apply(input));
	}
	return result;
}
 
Example 13
Source Project: flink   Source File: DelimitedInputFormatTest.java    License: Apache License 2.0 6 votes vote down vote up
private void testDelimiterOnBufferBoundary(FunctionWithException<String, FileInputSplit, IOException> splitCreator) throws IOException {
	String[] records = new String[]{"1234567890<DEL?NO!>1234567890", "1234567890<DEL?NO!>1234567890", "<DEL?NO!>"};
	String delimiter = "<DELIM>";
	String fileContent = StringUtils.join(records, delimiter);


	final FileInputSplit split = splitCreator.apply(fileContent);
	final Configuration parameters = new Configuration();

	format.setBufferSize(12);
	format.setDelimiter(delimiter);
	format.configure(parameters);
	format.open(split);

	for (String record : records) {
		String value = format.nextRecord(null);
		assertEquals(record, value);
	}

	assertNull(format.nextRecord(null));
	assertTrue(format.reachedEnd());

	format.close();
}
 
Example 14
Source Project: flink   Source File: S3RecoverableFsDataOutputStream.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Single constructor to initialize all. Actual setup of the parts happens in the
 * factory methods.
 */
S3RecoverableFsDataOutputStream(
		RecoverableMultiPartUpload upload,
		FunctionWithException<File, RefCountedFileWithStream, IOException> tempFileCreator,
		RefCountedFSOutputStream initialTmpFile,
		long userDefinedMinPartSize,
		long bytesBeforeCurrentPart) {

	checkArgument(bytesBeforeCurrentPart >= 0L);

	this.upload = checkNotNull(upload);
	this.tmpFileProvider = checkNotNull(tempFileCreator);
	this.userDefinedMinPartSize = userDefinedMinPartSize;

	this.fileStream = initialTmpFile;
	this.bytesBeforeCurrentPart = bytesBeforeCurrentPart;
}
 
Example 15
Source Project: flink   Source File: S3RecoverableFsDataOutputStream.java    License: Apache License 2.0 6 votes vote down vote up
public static S3RecoverableFsDataOutputStream recoverStream(
		final RecoverableMultiPartUpload upload,
		final FunctionWithException<File, RefCountedFileWithStream, IOException> tmpFileCreator,
		final long userDefinedMinPartSize,
		final long bytesBeforeCurrentPart) throws IOException {

	checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE);

	final RefCountedBufferingFileStream fileStream = boundedBufferingFileStream(
			tmpFileCreator,
			upload.getIncompletePart());

	return new S3RecoverableFsDataOutputStream(
			upload,
			tmpFileCreator,
			fileStream,
			userDefinedMinPartSize,
			bytesBeforeCurrentPart);
}
 
Example 16
Source Project: flink   Source File: S3RecoverableWriter.java    License: Apache License 2.0 6 votes vote down vote up
public static S3RecoverableWriter writer(
		final FileSystem fs,
		final FunctionWithException<File, RefCountedFileWithStream, IOException> tempFileCreator,
		final S3AccessHelper s3AccessHelper,
		final Executor uploadThreadPool,
		final long userDefinedMinPartSize,
		final int maxConcurrentUploadsPerStream) {

	checkArgument(userDefinedMinPartSize >= S3_MULTIPART_MIN_PART_SIZE);

	final S3RecoverableMultipartUploadFactory uploadFactory =
			new S3RecoverableMultipartUploadFactory(
					fs,
					s3AccessHelper,
					maxConcurrentUploadsPerStream,
					uploadThreadPool,
					tempFileCreator);

	return new S3RecoverableWriter(s3AccessHelper, uploadFactory, tempFileCreator, userDefinedMinPartSize);
}
 
Example 17
Source Project: flink   Source File: ResultPartition.java    License: Apache License 2.0 6 votes vote down vote up
public ResultPartition(
	String owningTaskName,
	int partitionIndex,
	ResultPartitionID partitionId,
	ResultPartitionType partitionType,
	ResultSubpartition[] subpartitions,
	int numTargetKeyGroups,
	ResultPartitionManager partitionManager,
	@Nullable BufferCompressor bufferCompressor,
	FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) {

	this.owningTaskName = checkNotNull(owningTaskName);
	Preconditions.checkArgument(0 <= partitionIndex, "The partition index must be positive.");
	this.partitionIndex = partitionIndex;
	this.partitionId = checkNotNull(partitionId);
	this.partitionType = checkNotNull(partitionType);
	this.subpartitions = checkNotNull(subpartitions);
	this.numTargetKeyGroups = numTargetKeyGroups;
	this.partitionManager = checkNotNull(partitionManager);
	this.bufferCompressor = bufferCompressor;
	this.bufferPoolFactory = bufferPoolFactory;
}
 
Example 18
Source Project: flink   Source File: ResultPartitionFactory.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * The minimum pool size should be <code>numberOfSubpartitions + 1</code> for two considerations:
 *
 * <p>1. StreamTask can only process input if there is at-least one available buffer on output side, so it might cause
 * stuck problem if the minimum pool size is exactly equal to the number of subpartitions, because every subpartition
 * might maintain a partial unfilled buffer.
 *
 * <p>2. Increases one more buffer for every output LocalBufferPool to void performance regression if processing input is
 * based on at-least one buffer available on output side.
 */
@VisibleForTesting
FunctionWithException<BufferPoolOwner, BufferPool, IOException> createBufferPoolFactory(
		int numberOfSubpartitions,
		ResultPartitionType type) {
	return bufferPoolOwner -> {
		int maxNumberOfMemorySegments = type.isBounded() ?
			numberOfSubpartitions * networkBuffersPerChannel + floatingNetworkBuffersPerGate : Integer.MAX_VALUE;
		// If the partition type is back pressure-free, we register with the buffer pool for
		// callbacks to release memory.
		return bufferPoolFactory.createBufferPool(
			numberOfSubpartitions + 1,
			maxNumberOfMemorySegments,
			type.hasBackPressure() ? null : bufferPoolOwner,
			numberOfSubpartitions,
			maxBuffersPerChannel);
	};
}
 
Example 19
Source Project: flink   Source File: ReleaseOnConsumptionResultPartition.java    License: Apache License 2.0 6 votes vote down vote up
ReleaseOnConsumptionResultPartition(
		String owningTaskName,
		int partitionIndex,
		ResultPartitionID partitionId,
		ResultPartitionType partitionType,
		ResultSubpartition[] subpartitions,
		int numTargetKeyGroups,
		ResultPartitionManager partitionManager,
		@Nullable BufferCompressor bufferCompressor,
		FunctionWithException<BufferPoolOwner, BufferPool, IOException> bufferPoolFactory) {
	super(
		owningTaskName,
		partitionIndex,
		partitionId,
		partitionType,
		subpartitions,
		numTargetKeyGroups,
		partitionManager,
		bufferCompressor,
		bufferPoolFactory);

	this.consumedSubpartitions = new boolean[subpartitions.length];
	this.numUnconsumedSubpartitions = subpartitions.length;
}
 
Example 20
Source Project: flink   Source File: OneInputStreamTaskTestHarness.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a test harness with the specified number of input gates and specified number
 * of channels per input gate and specified localRecoveryConfig.
 */
public OneInputStreamTaskTestHarness(
		FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory,
		int numInputGates,
		int numInputChannelsPerGate,
		TypeInformation<IN> inputType,
		TypeInformation<OUT> outputType,
		LocalRecoveryConfig localRecoveryConfig) {

	super(taskFactory, outputType, localRecoveryConfig);

	this.inputType = inputType;
	inputSerializer = inputType.createSerializer(executionConfig);

	this.numInputGates = numInputGates;
	this.numInputChannelsPerGate = numInputChannelsPerGate;
}
 
Example 21
Source Project: flink   Source File: StreamTaskTestHarness.java    License: Apache License 2.0 6 votes vote down vote up
public StreamTaskTestHarness(
	FunctionWithException<Environment, ? extends StreamTask<OUT, ?>, Exception> taskFactory,
	TypeInformation<OUT> outputType,
	LocalRecoveryConfig localRecoveryConfig) {
	this.taskFactory = checkNotNull(taskFactory);
	this.memorySize = DEFAULT_MEMORY_MANAGER_SIZE;
	this.bufferSize = DEFAULT_NETWORK_BUFFER_SIZE;

	this.jobConfig = new Configuration();
	this.taskConfig = new Configuration();
	this.executionConfig = new ExecutionConfig();

	streamConfig = new StreamConfig(taskConfig);
	streamConfig.setBufferTimeout(0);

	outputSerializer = outputType.createSerializer(executionConfig);
	outputStreamRecordSerializer = new StreamElementSerializer<>(outputSerializer);

	this.taskStateManager = new TestTaskStateManager(localRecoveryConfig);
}
 
Example 22
Source Project: flink   Source File: ResultPartitionBuilder.java    License: Apache License 2.0 6 votes vote down vote up
public ResultPartition build() {
	ResultPartitionFactory resultPartitionFactory = new ResultPartitionFactory(
		partitionManager,
		channelManager,
		networkBufferPool,
		blockingSubpartitionType,
		networkBuffersPerChannel,
		floatingNetworkBuffersPerGate,
		networkBufferSize,
		releasedOnConsumption,
		blockingShuffleCompressionEnabled,
		compressionCodec,
		maxBuffersPerChannel);

	FunctionWithException<BufferPoolOwner, BufferPool, IOException> factory = bufferPoolFactory.orElseGet(() ->
		resultPartitionFactory.createBufferPoolFactory(numberOfSubpartitions, partitionType));

	return resultPartitionFactory.create(
		"Result Partition task",
		partitionIndex,
		partitionId,
		partitionType,
		numberOfSubpartitions,
		numTargetKeyGroups,
		factory);
}
 
Example 23
private static RefCountedBufferingFileStream boundedBufferingFileStream(
		final FunctionWithException<File, RefCountedFile, IOException> tmpFileCreator,
		final Optional<File> incompletePart) throws IOException {

	if (!incompletePart.isPresent()) {
		return RefCountedBufferingFileStream.openNew(tmpFileCreator);
	}

	final File file = incompletePart.get();
	return RefCountedBufferingFileStream.restore(tmpFileCreator, file);
}
 
Example 24
Source Project: Flink-CEPplus   Source File: S3RecoverableWriter.java    License: Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
S3RecoverableWriter(
		final S3AccessHelper s3AccessHelper,
		final S3RecoverableMultipartUploadFactory uploadFactory,
		final FunctionWithException<File, RefCountedFile, IOException> tempFileCreator,
		final long userDefinedMinPartSize) {

	this.s3AccessHelper = checkNotNull(s3AccessHelper);
	this.uploadFactory = checkNotNull(uploadFactory);
	this.tempFileCreator = checkNotNull(tempFileCreator);
	this.userDefinedMinPartSize = userDefinedMinPartSize;
}
 
Example 25
S3RecoverableMultipartUploadFactory(
		final FileSystem fs,
		final S3AccessHelper s3AccessHelper,
		final int maxConcurrentUploadsPerStream,
		final Executor executor,
		final FunctionWithException<File, RefCountedFile, IOException> tmpFileSupplier) {

	this.fs = Preconditions.checkNotNull(fs);
	this.maxConcurrentUploadsPerStream = maxConcurrentUploadsPerStream;
	this.executor = executor;
	this.s3AccessHelper = s3AccessHelper;
	this.tmpFileSupplier = tmpFileSupplier;
}
 
Example 26
public static RefCountedBufferingFileStream openNew(
		final FunctionWithException<File, RefCountedFile, IOException> tmpFileProvider) throws IOException {

	return new RefCountedBufferingFileStream(
			tmpFileProvider.apply(null),
			BUFFER_SIZE);
}
 
Example 27
public static RefCountedBufferingFileStream restore(
		final FunctionWithException<File, RefCountedFile, IOException> tmpFileProvider,
		final File initialTmpFile) throws IOException {

	return new RefCountedBufferingFileStream(
			tmpFileProvider.apply(initialTmpFile),
			BUFFER_SIZE);
}
 
Example 28
Source Project: Flink-CEPplus   Source File: Dispatcher.java    License: Apache License 2.0 5 votes vote down vote up
private CompletableFuture<Void> waitForTerminatingJobManager(JobID jobId, JobGraph jobGraph, FunctionWithException<JobGraph, CompletableFuture<Void>, ?> action) {
	final CompletableFuture<Void> jobManagerTerminationFuture = getJobTerminationFuture(jobId)
		.exceptionally((Throwable throwable) -> {
			throw new CompletionException(
				new DispatcherException(
					String.format("Termination of previous JobManager for job %s failed. Cannot submit job under the same job id.", jobId),
					throwable)); });

	return jobManagerTerminationFuture.thenComposeAsync(
		FunctionUtils.uncheckedFunction((ignored) -> {
			jobManagerTerminationFutures.remove(jobId);
			return action.apply(jobGraph);
		}),
		getMainThreadExecutor());
}
 
Example 29
private <T> T runRemoteMessageResponseTest(String payload, FunctionWithException<MessageRpcGateway, T, Exception> rpcCall) throws Exception {
	final MessageRpcEndpoint rpcEndpoint = new MessageRpcEndpoint(rpcService1, payload);

	try {
		rpcEndpoint.start();

		MessageRpcGateway rpcGateway = rpcService2.connect(rpcEndpoint.getAddress(), MessageRpcGateway.class).get();

		return rpcCall.apply(rpcGateway);
	} finally {
		RpcUtils.terminateRpcEndpoint(rpcEndpoint, TIMEOUT);
	}
}
 
Example 30
private <T> T runLocalMessageResponseTest(String payload, FunctionWithException<MessageRpcGateway, T, Exception> rpcCall) throws Exception {
	final MessageRpcEndpoint rpcEndpoint = new MessageRpcEndpoint(rpcService1, payload);

	try {
		rpcEndpoint.start();

		MessageRpcGateway rpcGateway = rpcService1.connect(rpcEndpoint.getAddress(), MessageRpcGateway.class).get();

		return rpcCall.apply(rpcGateway);
	} finally {
		RpcUtils.terminateRpcEndpoint(rpcEndpoint, TIMEOUT);
	}
}