Java Code Examples for org.apache.flink.core.testutils.OneShotLatch#await()

The following examples show how to use org.apache.flink.core.testutils.OneShotLatch#await() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: JobManagerRunnerImplTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testLibraryCacheManagerRegistration() throws Exception {
	final OneShotLatch registerClassLoaderLatch = new OneShotLatch();
	final OneShotLatch closeClassLoaderLeaseLatch = new OneShotLatch();
	final TestingClassLoaderLease classLoaderLease = TestingClassLoaderLease.newBuilder()
		.setGetOrResolveClassLoaderFunction((permanentBlobKeys, urls) -> {
			registerClassLoaderLatch.trigger();
			return JobManagerRunnerImplTest.class.getClassLoader();
		})
		.setCloseRunnable(closeClassLoaderLeaseLatch::trigger)
		.build();
	final JobManagerRunner jobManagerRunner = createJobManagerRunner(classLoaderLease);

	try {
		jobManagerRunner.start();

		registerClassLoaderLatch.await();

		jobManagerRunner.close();

		closeClassLoaderLeaseLatch.await();
	} finally {
		jobManagerRunner.close();
	}
}
 
Example 2
Source File: TaskExecutorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void taskExecutorJobServicesCloseClassLoaderLeaseUponClosing() throws InterruptedException {
	final OneShotLatch leaseReleaseLatch = new OneShotLatch();
	final OneShotLatch closeHookLatch = new OneShotLatch();
	final TestingClassLoaderLease classLoaderLease = TestingClassLoaderLease.newBuilder()
		.setCloseRunnable(leaseReleaseLatch::trigger)
		.build();

	final TaskExecutor.TaskExecutorJobServices taskExecutorJobServices = TaskExecutor.TaskExecutorJobServices.create(
		classLoaderLease,
		closeHookLatch::trigger);

	taskExecutorJobServices.close();
	leaseReleaseLatch.await();
	closeHookLatch.await();
}
 
Example 3
Source File: CheckpointStateOutputStreamTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * This test validates that a close operation can happen even while a 'closeAndGetHandle()'
 * call is in progress.
 * <p>
 * <p>That behavior is essential for fast cancellation (concurrent cleanup).
 */
@Test
public void testCloseDoesNotLock() throws Exception {
	final Path folder = new Path(tmp.newFolder().toURI());
	final String fileName = "this-is-ignored-anyways.file";

	final FileSystem fileSystem = spy(new TestFs((path) -> new BlockerStream()));

	final FSDataOutputStream checkpointStream =
		createTestStream(fileSystem, folder, fileName);

	final OneShotLatch sync = new OneShotLatch();

	final CheckedThread thread = new CheckedThread() {

		@Override
		public void go() throws Exception {
			sync.trigger();
			// that call should now block, because it accesses the position
			closeAndGetResult(checkpointStream);
		}
	};
	thread.start();

	sync.await();
	checkpointStream.close();

	// the thread may or may not fail, that depends on the thread race
	// it is not important for this test, important is that the thread does not freeze/lock up
	try {
		thread.sync();
	} catch (IOException ignored) {}
}
 
Example 4
Source File: NetworkBufferPoolTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link NetworkBufferPool#requestMemorySegments()}, verifying it may be aborted in
 * case of a concurrent {@link NetworkBufferPool#destroy()} call.
 */
@Test
public void testRequestMemorySegmentsInterruptable() throws Exception {
	final int numBuffers = 10;

	NetworkBufferPool globalPool = new NetworkBufferPool(numBuffers, 128, 10);
	MemorySegment segment = globalPool.requestMemorySegment();
	assertNotNull(segment);

	final OneShotLatch isRunning = new OneShotLatch();
	CheckedThread asyncRequest = new CheckedThread() {
		@Override
		public void go() throws Exception {
			isRunning.trigger();
			globalPool.requestMemorySegments();
		}
	};
	asyncRequest.start();

	// We want the destroy call inside the blocking part of the globalPool.requestMemorySegments()
	// call above. We cannot guarantee this though but make it highly probable:
	isRunning.await();
	Thread.sleep(10);
	globalPool.destroy();

	segment.free();

	expectedException.expect(IllegalStateException.class);
	expectedException.expectMessage("destroyed");
	try {
		asyncRequest.sync();
	} finally {
		globalPool.destroy();
	}
}
 
Example 5
Source File: AkkaRpcServiceTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the {@link AkkaRpcService} can execute runnables.
 */
@Test
public void testExecuteRunnable() throws Exception {
	final OneShotLatch latch = new OneShotLatch();

	akkaRpcService.execute(latch::trigger);

	latch.await(30L, TimeUnit.SECONDS);
}
 
Example 6
Source File: StreamTaskTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected void processInput(MailboxDefaultAction.Controller controller) throws Exception {
	final OneShotLatch latch = new OneShotLatch();
	final Object lock = new Object();

	LockHolder holder = new LockHolder(lock, latch);
	holder.start();
	try {
		// cancellation should try and cancel this
		getCancelables().registerCloseable(holder);

		// wait till the lock holder has the lock
		latch.await();

		// we are at the point where cancelling can happen
		syncLatch.trigger();

		// try to acquire the lock - this is not possible as long as the lock holder
		// thread lives
		//noinspection SynchronizationOnLocalVariableOrMethodParameter
		synchronized (lock) {
			// nothing
		}
	}
	finally {
		holder.close();
	}
	controller.allActionsCompleted();
}
 
Example 7
Source File: CheckpointCoordinatorTriggeringTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * This test only fails eventually.
 */
@Test
public void discardingTriggeringCheckpointWillExecuteNextCheckpointRequest() throws Exception {
	final ExecutionVertex executionVertex = mockExecutionVertex(new ExecutionAttemptID());

	final ScheduledExecutorService scheduledExecutorService = Executors.newSingleThreadScheduledExecutor();
	final CheckpointCoordinator checkpointCoordinator = new CheckpointCoordinatorTestingUtils.CheckpointCoordinatorBuilder()
		.setTasks(new ExecutionVertex[]{executionVertex})
		.setTimer(new ScheduledExecutorServiceAdapter(scheduledExecutorService))
		.setCheckpointCoordinatorConfiguration(CheckpointCoordinatorConfiguration.builder()
			.build())
		.build();

	final CompletableFuture<String> masterHookCheckpointFuture = new CompletableFuture<>();
	final OneShotLatch triggerCheckpointLatch = new OneShotLatch();
	checkpointCoordinator.addMasterHook(new TestingMasterHook(masterHookCheckpointFuture, triggerCheckpointLatch));

	try {
		checkpointCoordinator.triggerCheckpoint(false);
		final CompletableFuture<CompletedCheckpoint> secondCheckpoint = checkpointCoordinator.triggerCheckpoint(false);

		triggerCheckpointLatch.await();
		masterHookCheckpointFuture.complete("Completed");

		// discard triggering checkpoint
		checkpointCoordinator.abortPendingCheckpoints(new CheckpointException(CheckpointFailureReason.CHECKPOINT_DECLINED));

		try {
			// verify that the second checkpoint request will be executed and eventually times out
			secondCheckpoint.get();
			fail("Expected the second checkpoint to fail.");
		} catch (ExecutionException ee) {
			assertThat(ExceptionUtils.stripExecutionException(ee), instanceOf(CheckpointException.class));
		}
	} finally {
		checkpointCoordinator.shutdown(JobStatus.FINISHED);
		ExecutorUtils.gracefulShutdown(10L, TimeUnit.SECONDS, scheduledExecutorService);
	}
}
 
Example 8
Source File: CheckpointStateOutputStreamTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * This test validates that a close operation can happen even while a 'closeAndGetHandle()'
 * call is in progress.
 * <p>
 * <p>That behavior is essential for fast cancellation (concurrent cleanup).
 */
@Test
public void testCloseDoesNotLock() throws Exception {
	final Path folder = new Path(tmp.newFolder().toURI());
	final String fileName = "this-is-ignored-anyways.file";

	final FileSystem fileSystem = spy(new TestFs((path) -> new BlockerStream()));

	final FSDataOutputStream checkpointStream =
		createTestStream(fileSystem, folder, fileName);

	final OneShotLatch sync = new OneShotLatch();

	final CheckedThread thread = new CheckedThread() {

		@Override
		public void go() throws Exception {
			sync.trigger();
			// that call should now block, because it accesses the position
			closeAndGetResult(checkpointStream);
		}
	};
	thread.start();

	sync.await();
	checkpointStream.close();

	// the thread may or may not fail, that depends on the thread race
	// it is not important for this test, important is that the thread does not freeze/lock up
	try {
		thread.sync();
	} catch (IOException ignored) {}
}
 
Example 9
Source File: JobMasterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that we can close an unestablished ResourceManager connection.
 */
@Test
public void testCloseUnestablishedResourceManagerConnection() throws Exception {
	final JobMaster jobMaster = createJobMaster(
		configuration,
		jobGraph,
		haServices,
		new TestingJobManagerSharedServicesBuilder().build());

	try {
		jobMaster.start(JobMasterId.generate()).get();

		final TestingResourceManagerGateway firstResourceManagerGateway = createAndRegisterTestingResourceManagerGateway();
		final TestingResourceManagerGateway secondResourceManagerGateway = createAndRegisterTestingResourceManagerGateway();

		final OneShotLatch firstJobManagerRegistration = new OneShotLatch();
		final OneShotLatch secondJobManagerRegistration = new OneShotLatch();

		firstResourceManagerGateway.setRegisterJobManagerConsumer(
			jobMasterIdResourceIDStringJobIDTuple4 -> firstJobManagerRegistration.trigger());

		secondResourceManagerGateway.setRegisterJobManagerConsumer(
			jobMasterIdResourceIDStringJobIDTuple4 -> secondJobManagerRegistration.trigger());

		notifyResourceManagerLeaderListeners(firstResourceManagerGateway);

		// wait until we have seen the first registration attempt
		firstJobManagerRegistration.await();

		// this should stop the connection attempts towards the first RM
		notifyResourceManagerLeaderListeners(secondResourceManagerGateway);

		// check that we start registering at the second RM
		secondJobManagerRegistration.await();
	} finally {
		RpcUtils.terminateRpcEndpoint(jobMaster, testingTimeout);
	}
}
 
Example 10
Source File: DispatcherHATest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that interleaved granting and revoking of the leadership won't interfere
 * with the job recovery and the resulting internal state of the Dispatcher.
 */
@Test
public void testGrantingRevokingLeadership() throws Exception {
	final TestingHighAvailabilityServices highAvailabilityServices = new TestingHighAvailabilityServices();
	final JobGraph nonEmptyJobGraph = createNonEmptyJobGraph();
	final SubmittedJobGraph submittedJobGraph = new SubmittedJobGraph(nonEmptyJobGraph);

	final OneShotLatch enterGetJobIdsLatch = new OneShotLatch();
	final OneShotLatch proceedGetJobIdsLatch = new OneShotLatch();
	highAvailabilityServices.setSubmittedJobGraphStore(new BlockingSubmittedJobGraphStore(submittedJobGraph, enterGetJobIdsLatch, proceedGetJobIdsLatch));
	final TestingLeaderElectionService dispatcherLeaderElectionService = new TestingLeaderElectionService();
	highAvailabilityServices.setDispatcherLeaderElectionService(dispatcherLeaderElectionService);

	final BlockingQueue<DispatcherId> fencingTokens = new ArrayBlockingQueue<>(2);

	final HATestingDispatcher dispatcher = createDispatcherWithObservableFencingTokens(highAvailabilityServices, fencingTokens);

	dispatcher.start();

	try {
		// wait until the election service has been started
		dispatcherLeaderElectionService.getStartFuture().get();

		final UUID leaderId = UUID.randomUUID();
		dispatcherLeaderElectionService.isLeader(leaderId);

		dispatcherLeaderElectionService.notLeader();

		final DispatcherId firstFencingToken = fencingTokens.take();

		assertThat(firstFencingToken, equalTo(NULL_FENCING_TOKEN));

		enterGetJobIdsLatch.await();
		proceedGetJobIdsLatch.trigger();

		assertThat(dispatcher.getNumberJobs(timeout).get(), is(0));

	} finally {
		RpcUtils.terminateRpcEndpoint(dispatcher, timeout);
	}
}
 
Example 11
Source File: StateBackendTestBase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testAsyncSnapshotCancellation() throws Exception {
	OneShotLatch blocker = new OneShotLatch();
	OneShotLatch waiter = new OneShotLatch();
	BlockerCheckpointStreamFactory streamFactory = new BlockerCheckpointStreamFactory(1024 * 1024);
	streamFactory.setWaiterLatch(waiter);
	streamFactory.setBlockerLatch(blocker);
	streamFactory.setAfterNumberInvocations(10);

	final AbstractKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE);

	try {

		if (!backend.supportsAsynchronousSnapshots()) {
			return;
		}

		InternalValueState<Integer, VoidNamespace, Integer> valueState = backend.createInternalState(
				VoidNamespaceSerializer.INSTANCE,
				new ValueStateDescriptor<>("test", IntSerializer.INSTANCE));

		valueState.setCurrentNamespace(VoidNamespace.INSTANCE);

		for (int i = 0; i < 10; ++i) {
			backend.setCurrentKey(i);
			valueState.update(i);
		}

		RunnableFuture<SnapshotResult<KeyedStateHandle>> snapshot =
				backend.snapshot(0L, 0L, streamFactory, CheckpointOptions.forCheckpointWithDefaultLocation());

		Thread runner = new Thread(snapshot);
		runner.start();

		// wait until the code reached some stream read
		waiter.await();

		// close the backend to see if the close is propagated to the stream
		IOUtils.closeQuietly(backend);

		//unblock the stream so that it can run into the IOException
		blocker.trigger();

		runner.join();

		try {
			snapshot.get();
			fail("Close was not propagated.");
		} catch (CancellationException ex) {
			//ignore
		}

	} finally {
		backend.dispose();
	}
}
 
Example 12
Source File: AbstractFetcherTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testConcurrentPartitionsDiscoveryAndLoopFetching() throws Exception {
	// test data
	final KafkaTopicPartition testPartition = new KafkaTopicPartition("test", 42);

	// ----- create the test fetcher -----

	@SuppressWarnings("unchecked")
	SourceContext<String> sourceContext = new TestSourceContext<>();
	Map<KafkaTopicPartition, Long> partitionsWithInitialOffsets =
		Collections.singletonMap(testPartition, KafkaTopicPartitionStateSentinel.GROUP_OFFSET);

	final OneShotLatch fetchLoopWaitLatch = new OneShotLatch();
	final OneShotLatch stateIterationBlockLatch = new OneShotLatch();

	final TestFetcher<String> fetcher = new TestFetcher<>(
		sourceContext,
		partitionsWithInitialOffsets,
		null, /* periodic assigner */
		null, /* punctuated assigner */
		new TestProcessingTimeService(),
		10,
		fetchLoopWaitLatch,
		stateIterationBlockLatch);

	// ----- run the fetcher -----

	final CheckedThread checkedThread = new CheckedThread() {
		@Override
		public void go() throws Exception {
			fetcher.runFetchLoop();
		}
	};
	checkedThread.start();

	// wait until state iteration begins before adding discovered partitions
	fetchLoopWaitLatch.await();
	fetcher.addDiscoveredPartitions(Collections.singletonList(testPartition));

	stateIterationBlockLatch.trigger();
	checkedThread.sync();
}
 
Example 13
Source File: SlotPoolImplTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that idle slots which cannot be released will be discarded. See FLINK-11059.
 */
@Test
public void testDiscardIdleSlotIfReleasingFailed() throws Exception {
	final ManualClock clock = new ManualClock();

	try (SlotPoolImpl slotPool = new SlotPoolImpl(
		jobId,
		clock,
		TestingUtils.infiniteTime(),
		timeout)) {

		setupSlotPool(slotPool, resourceManagerGateway, mainThreadExecutor);
		Scheduler scheduler = setupScheduler(slotPool, mainThreadExecutor);

		final AllocationID expiredAllocationId = new AllocationID();
		final SlotOffer slotToExpire = new SlotOffer(expiredAllocationId, 0, ResourceProfile.UNKNOWN);

		OneShotLatch freeSlotLatch = new OneShotLatch();
		taskManagerGateway.setFreeSlotFunction((AllocationID allocationId, Throwable cause) -> {
			freeSlotLatch.trigger();
			return FutureUtils.completedExceptionally(new TimeoutException("Test failure"));
		});

		assertThat(slotPool.registerTaskManager(taskManagerLocation.getResourceID()), Matchers.is(true));

		assertThat(slotPool.offerSlot(taskManagerLocation, taskManagerGateway, slotToExpire), Matchers.is(true));

		clock.advanceTime(timeout.toMilliseconds() + 1, TimeUnit.MILLISECONDS);

		slotPool.triggerCheckIdleSlot();

		freeSlotLatch.await();

		CompletableFuture<LogicalSlot> allocatedSlotFuture = allocateSlot(scheduler, new SlotRequestId());

		try {
			// since the slot must have been discarded, we cannot fulfill the slot request
			allocatedSlotFuture.get(10L, TimeUnit.MILLISECONDS);
			fail("Expected to fail with a timeout.");
		} catch (TimeoutException ignored) {
			// expected
			assertEquals(0, slotPool.getAvailableSlots().size());
		}
	}
}
 
Example 14
Source File: StateBackendTestBase.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * The purpose of this test is to check that parallel snapshots are possible, and work even if a previous snapshot
 * is still running and blocking.
 */
@Test
public void testParallelAsyncSnapshots() throws Exception {
	OneShotLatch blocker = new OneShotLatch();
	OneShotLatch waiter = new OneShotLatch();
	BlockerCheckpointStreamFactory streamFactory = new BlockerCheckpointStreamFactory(1024 * 1024);
	streamFactory.setWaiterLatch(waiter);
	streamFactory.setBlockerLatch(blocker);
	streamFactory.setAfterNumberInvocations(10);

	final AbstractKeyedStateBackend<Integer> backend = createKeyedBackend(IntSerializer.INSTANCE);

	try {

		if (!backend.supportsAsynchronousSnapshots()) {
			return;
		}

		// insert some data to the backend.
		InternalValueState<Integer, VoidNamespace, Integer> valueState = backend.createInternalState(
			VoidNamespaceSerializer.INSTANCE,
			new ValueStateDescriptor<>("test", IntSerializer.INSTANCE));

		valueState.setCurrentNamespace(VoidNamespace.INSTANCE);

		for (int i = 0; i < 10; ++i) {
			backend.setCurrentKey(i);
			valueState.update(i);
		}

		RunnableFuture<SnapshotResult<KeyedStateHandle>> snapshot1 =
			backend.snapshot(0L, 0L, streamFactory, CheckpointOptions.forCheckpointWithDefaultLocation());

		Thread runner1 = new Thread(snapshot1, "snapshot-1-runner");
		runner1.start();
		// after this call returns, we have a running snapshot-1 that is blocked in IO.
		waiter.await();

		// do some updates in between the snapshots.
		for (int i = 5; i < 15; ++i) {
			backend.setCurrentKey(i);
			valueState.update(i + 1);
		}

		// we don't want to block the second snapshot.
		streamFactory.setWaiterLatch(null);
		streamFactory.setBlockerLatch(null);

		RunnableFuture<SnapshotResult<KeyedStateHandle>> snapshot2 =
			backend.snapshot(1L, 1L, streamFactory, CheckpointOptions.forCheckpointWithDefaultLocation());

		Thread runner2 = new Thread(snapshot2,"snapshot-2-runner");
		runner2.start();
		// snapshot-2 should run and succeed, while snapshot-1 is still running and blocked in IO.
		snapshot2.get();

		// we release the blocking IO so that snapshot-1 can also finish and succeed.
		blocker.trigger();
		snapshot1.get();

	} finally {
		backend.dispose();
	}
}
 
Example 15
Source File: TaskExecutorTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testMaximumRegistrationDurationAfterConnectionLoss() throws Exception {
	configuration.setString(TaskManagerOptions.REGISTRATION_TIMEOUT, "100 ms");
	final TaskSlotTable taskSlotTable = new TaskSlotTable(Collections.singleton(ResourceProfile.UNKNOWN), timerService);

	final long heartbeatInterval = 10L;
	final TaskManagerServices taskManagerServices = new TaskManagerServicesBuilder().setTaskSlotTable(taskSlotTable).build();
	final TaskExecutor taskExecutor = new TaskExecutor(
		rpc,
		TaskManagerConfiguration.fromConfiguration(configuration),
		haServices,
		taskManagerServices,
		new HeartbeatServices(heartbeatInterval, 10L),
		UnregisteredMetricGroups.createUnregisteredTaskManagerMetricGroup(),
		null,
		dummyBlobCacheService,
		testingFatalErrorHandler);

	taskExecutor.start();

	final CompletableFuture<ResourceID> registrationFuture = new CompletableFuture<>();
	final OneShotLatch secondRegistration = new OneShotLatch();
	try {
		final TestingResourceManagerGateway testingResourceManagerGateway = new TestingResourceManagerGateway();
		testingResourceManagerGateway.setRegisterTaskExecutorFunction(
			tuple -> {
				if (registrationFuture.complete(tuple.f1)) {
					return CompletableFuture.completedFuture(new TaskExecutorRegistrationSuccess(
						new InstanceID(),
						testingResourceManagerGateway.getOwnResourceId(),
						new ClusterInformation("localhost", 1234)));
				} else {
					secondRegistration.trigger();
					return CompletableFuture.completedFuture(new RegistrationResponse.Decline("Only the first registration should succeed."));
				}
			}
		);
		rpc.registerGateway(testingResourceManagerGateway.getAddress(), testingResourceManagerGateway);

		resourceManagerLeaderRetriever.notifyListener(testingResourceManagerGateway.getAddress(), UUID.randomUUID());

		final ResourceID registrationResourceId = registrationFuture.get();

		assertThat(registrationResourceId, equalTo(taskManagerServices.getTaskManagerLocation().getResourceID()));

		secondRegistration.await();

		final Throwable error = testingFatalErrorHandler.getErrorFuture().get();
		assertThat(error, is(notNullValue()));
		assertThat(ExceptionUtils.stripExecutionException(error), instanceOf(RegistrationTimeoutException.class));

		testingFatalErrorHandler.clearError();
	} finally {
		RpcUtils.terminateRpcEndpoint(taskExecutor, timeout);
	}
}
 
Example 16
Source File: ContinuousFileProcessingTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testProcessOnce() throws Exception {
	String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/";

	final OneShotLatch latch = new OneShotLatch();

	// create a single file in the directory
	Tuple2<org.apache.hadoop.fs.Path, String> bootstrap =
		createFileAndFillWithData(testBasePath, "file", NO_OF_FILES + 1, "This is test line.");
	Assert.assertTrue(hdfs.exists(bootstrap.f0));

	// the source is supposed to read only this file.
	final Set<String> filesToBeRead = new TreeSet<>();
	filesToBeRead.add(bootstrap.f0.getName());

	TextInputFormat format = new TextInputFormat(new Path(testBasePath));
	format.setFilesFilter(FilePathFilter.createDefaultFilter());

	final ContinuousFileMonitoringFunction<String> monitoringFunction =
		createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_ONCE);

	final FileVerifyingSourceContext context = new FileVerifyingSourceContext(latch, monitoringFunction);

	final Thread t = new Thread() {
		@Override
		public void run() {
			try {
				monitoringFunction.open(new Configuration());
				monitoringFunction.run(context);

				// we would never arrive here if we were in
				// PROCESS_CONTINUOUSLY mode.

				// this will trigger the latch
				context.close();

			} catch (Exception e) {
				Assert.fail(e.getMessage());
			}
		}
	};
	t.start();

	if (!latch.isTriggered()) {
		latch.await();
	}

	// create some additional files that should be processed in the case of PROCESS_CONTINUOUSLY
	final org.apache.hadoop.fs.Path[] filesCreated = new org.apache.hadoop.fs.Path[NO_OF_FILES];
	for (int i = 0; i < NO_OF_FILES; i++) {
		Tuple2<org.apache.hadoop.fs.Path, String> ignoredFile =
			createFileAndFillWithData(testBasePath, "file", i, "This is test line.");
		filesCreated[i] = ignoredFile.f0;
	}

	// wait until the monitoring thread exits
	t.join();

	Assert.assertArrayEquals(filesToBeRead.toArray(), context.getSeenFiles().toArray());

	// finally delete the files created for the test.
	hdfs.delete(bootstrap.f0, false);
	for (org.apache.hadoop.fs.Path path: filesCreated) {
		hdfs.delete(path, false);
	}
}
 
Example 17
Source File: FutureUtilsTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that we can cancel a retry future.
 */
@Test
public void testRetryCancellation() throws Exception {
	final int retries = 10;
	final AtomicInteger atomicInteger = new AtomicInteger(0);
	final OneShotLatch notificationLatch = new OneShotLatch();
	final OneShotLatch waitLatch = new OneShotLatch();
	final AtomicReference<Throwable> atomicThrowable = new AtomicReference<>(null);

	CompletableFuture<?> retryFuture = FutureUtils.retry(
		() ->
			CompletableFuture.supplyAsync(
				() -> {
					if (atomicInteger.incrementAndGet() == 2) {
						notificationLatch.trigger();
						try {
							waitLatch.await();
						} catch (InterruptedException e) {
							atomicThrowable.compareAndSet(null, e);
						}
					}

					throw new CompletionException(new FlinkException("Test exception"));
				},
				TestingUtils.defaultExecutor()),
		retries,
		TestingUtils.defaultExecutor());

	// await that we have failed once
	notificationLatch.await();

	assertFalse(retryFuture.isDone());

	// cancel the retry future
	retryFuture.cancel(false);

	// let the retry operation continue
	waitLatch.trigger();

	assertTrue(retryFuture.isCancelled());
	assertEquals(2, atomicInteger.get());

	if (atomicThrowable.get() != null) {
		throw new FlinkException("Exception occurred in the retry operation.", atomicThrowable.get());
	}
}
 
Example 18
Source File: InitOutputPathTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
private void runTest(final boolean useAwaits) throws Exception {
	final File tempFile = tempDir.newFile();
	final Path path1 = new Path(tempFile.getAbsolutePath(), "1");
	final Path path2 = new Path(tempFile.getAbsolutePath(), "2");

	final OneShotLatch deleteAwaitLatch1 = new OneShotLatch();
	final OneShotLatch deleteAwaitLatch2 = new OneShotLatch();
	final OneShotLatch mkdirsAwaitLatch1 = new OneShotLatch();
	final OneShotLatch mkdirsAwaitLatch2 = new OneShotLatch();

	final OneShotLatch deleteTriggerLatch1 = new OneShotLatch();
	final OneShotLatch deletetriggerLatch2 = new OneShotLatch();
	final OneShotLatch mkdirsTriggerLatch1 = new OneShotLatch();
	final OneShotLatch mkdirsTriggerLatch2 = new OneShotLatch();

	final OneShotLatch createAwaitLatch = new OneShotLatch();
	final OneShotLatch createTriggerLatch = new OneShotLatch();

	// this "new LocalDataOutputStream()" is in the end called by the async threads
	whenNew(LocalDataOutputStream.class).withAnyArguments().thenAnswer(new Answer<LocalDataOutputStream>() {

		@Override
		public LocalDataOutputStream answer(InvocationOnMock invocation) throws Throwable {
			createAwaitLatch.trigger();
			createTriggerLatch.await();

			final File file = (File) invocation.getArguments()[0];
			return new LocalDataOutputStream(file);
		}
	});

	final LocalFileSystem fs1 = new SyncedFileSystem(
			deleteAwaitLatch1, mkdirsAwaitLatch1,
			deleteTriggerLatch1, mkdirsTriggerLatch1);

	final LocalFileSystem fs2 = new SyncedFileSystem(
			deleteAwaitLatch2, mkdirsAwaitLatch2,
			deletetriggerLatch2, mkdirsTriggerLatch2);

	// start the concurrent file creators
	FileCreator thread1 = new FileCreator(fs1, path1);
	FileCreator thread2 = new FileCreator(fs2, path2);
	thread1.start();
	thread2.start();

	// wait until they both decide to delete the directory
	if (useAwaits) {
		deleteAwaitLatch1.await();
		deleteAwaitLatch2.await();
	} else {
		Thread.sleep(5);
	}

	// now send off #1 to delete the directory (it will pass the 'mkdirs' fast) and wait to create the file
	mkdirsTriggerLatch1.trigger();
	deleteTriggerLatch1.trigger();

	if (useAwaits) {
		createAwaitLatch.await();
	} else {
		// this needs a bit more sleep time, because here mockito is working
		Thread.sleep(100);
	}

	// now send off #2 to delete the directory - it waits at 'mkdirs'
	deletetriggerLatch2.trigger();
	if (useAwaits) {
		mkdirsAwaitLatch2.await();
	} else {
		Thread.sleep(5);
	}

	// let #1 try to create the file and see if it succeeded
	createTriggerLatch.trigger();
	if (useAwaits) {
		thread1.sync();
	} else {
		Thread.sleep(5);
	}

	// now let #1 finish up
	mkdirsTriggerLatch2.trigger();

	thread1.sync();
	thread2.sync();
}
 
Example 19
Source File: ResourceManagerTaskExecutorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Test delayed registration of task executor where the delay is introduced during connection from resource manager
 * to the registering task executor.
 */
@Test
public void testDelayedRegisterTaskExecutor() throws Exception {
	final Time fastTimeout = Time.milliseconds(1L);
	try {
		final OneShotLatch startConnection = new OneShotLatch();
		final OneShotLatch finishConnection = new OneShotLatch();

		// first registration is with blocking connection
		rpcService.setRpcGatewayFutureFunction(rpcGateway ->
			CompletableFuture.supplyAsync(
				() -> {
					startConnection.trigger();
					try {
						finishConnection.await();
					} catch (InterruptedException ignored) {}
					return rpcGateway;
				},
				TestingUtils.defaultExecutor()));

		CompletableFuture<RegistrationResponse> firstFuture =
			rmGateway.registerTaskExecutor(taskExecutorGateway.getAddress(), taskExecutorResourceID, dataPort, hardwareDescription, fastTimeout);
		try {
			firstFuture.get();
			fail("Should have failed because connection to taskmanager is delayed beyond timeout");
		} catch (Exception e) {
			assertThat(ExceptionUtils.stripExecutionException(e), instanceOf(AskTimeoutException.class));
		}

		startConnection.await();

		// second registration after timeout is with no delay, expecting it to be succeeded
		rpcService.resetRpcGatewayFutureFunction();
		CompletableFuture<RegistrationResponse> secondFuture =
			rmGateway.registerTaskExecutor(taskExecutorGateway.getAddress(), taskExecutorResourceID, dataPort, hardwareDescription, TIMEOUT);
		RegistrationResponse response = secondFuture.get();
		assertTrue(response instanceof TaskExecutorRegistrationSuccess);

		// on success, send slot report for taskmanager registration
		final SlotReport slotReport = new SlotReport(new SlotStatus(new SlotID(taskExecutorResourceID, 0), ResourceProfile.UNKNOWN));
		rmGateway.sendSlotReport(taskExecutorResourceID,
			((TaskExecutorRegistrationSuccess) response).getRegistrationId(), slotReport, TIMEOUT).get();

		// let the remaining part of the first registration proceed
		finishConnection.trigger();
		Thread.sleep(1L);

		// verify that the latest registration is valid not being unregistered by the delayed one
		final TaskManagerInfo taskManagerInfo = rmGateway.requestTaskManagerInfo(
			taskExecutorResourceID,
			TIMEOUT).get();
		assertThat(taskManagerInfo.getResourceId(), equalTo(taskExecutorResourceID));
		assertThat(taskManagerInfo.getNumberSlots(), equalTo(1));
	} finally {
		rpcService.resetRpcGatewayFutureFunction();
	}
}
 
Example 20
Source File: OneInputStreamTaskTest.java    From flink with Apache License 2.0 2 votes vote down vote up
/**
 * Tests that the stream operator can snapshot and restore the operator state of chained
 * operators.
 */
@Test
public void testSnapshottingAndRestoring() throws Exception {
	final Deadline deadline = new FiniteDuration(2, TimeUnit.MINUTES).fromNow();

	final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(
			OneInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);

	testHarness.setupOutputForSingletonOperatorChain();

	IdentityKeySelector<String> keySelector = new IdentityKeySelector<>();
	testHarness.configureForKeyedStream(keySelector, BasicTypeInfo.STRING_TYPE_INFO);

	long checkpointId = 1L;
	long checkpointTimestamp = 1L;
	int numberChainedTasks = 11;

	StreamConfig streamConfig = testHarness.getStreamConfig();

	configureChainedTestingStreamOperator(streamConfig, numberChainedTasks);
	TestTaskStateManager taskStateManager = testHarness.taskStateManager;
	OneShotLatch waitForAcknowledgeLatch = new OneShotLatch();

	taskStateManager.setWaitForReportLatch(waitForAcknowledgeLatch);

	// reset number of restore calls
	TestingStreamOperator.numberRestoreCalls = 0;

	testHarness.invoke();
	testHarness.waitForTaskRunning();

	final OneInputStreamTask<String, String> streamTask = testHarness.getTask();

	CheckpointMetaData checkpointMetaData = new CheckpointMetaData(checkpointId, checkpointTimestamp);

	streamTask.triggerCheckpointAsync(checkpointMetaData, CheckpointOptions.forCheckpointWithDefaultLocation(), false).get();

	// since no state was set, there shouldn't be restore calls
	assertEquals(0, TestingStreamOperator.numberRestoreCalls);

	waitForAcknowledgeLatch.await();

	assertEquals(checkpointId, taskStateManager.getReportedCheckpointId());

	testHarness.endInput();
	testHarness.waitForTaskCompletion(deadline.timeLeft().toMillis());

	final OneInputStreamTaskTestHarness<String, String> restoredTaskHarness =
			new OneInputStreamTaskTestHarness<>(
					OneInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);

	restoredTaskHarness.configureForKeyedStream(keySelector, BasicTypeInfo.STRING_TYPE_INFO);

	restoredTaskHarness.setTaskStateSnapshot(checkpointId, taskStateManager.getLastJobManagerTaskStateSnapshot());

	StreamConfig restoredTaskStreamConfig = restoredTaskHarness.getStreamConfig();

	configureChainedTestingStreamOperator(restoredTaskStreamConfig, numberChainedTasks);

	TaskStateSnapshot stateHandles = taskStateManager.getLastJobManagerTaskStateSnapshot();
	Assert.assertEquals(numberChainedTasks, stateHandles.getSubtaskStateMappings().size());

	TestingStreamOperator.numberRestoreCalls = 0;

	// transfer state to new harness
	restoredTaskHarness.taskStateManager.restoreLatestCheckpointState(
		taskStateManager.getJobManagerTaskStateSnapshotsByCheckpointId());
	restoredTaskHarness.invoke();
	restoredTaskHarness.endInput();
	restoredTaskHarness.waitForTaskCompletion(deadline.timeLeft().toMillis());

	// restore of every chained operator should have been called
	assertEquals(numberChainedTasks, TestingStreamOperator.numberRestoreCalls);

	TestingStreamOperator.numberRestoreCalls = 0;
	TestingStreamOperator.numberSnapshotCalls = 0;
}