Java Code Examples for org.apache.flink.api.common.time.Time#milliseconds()

The following examples show how to use org.apache.flink.api.common.time.Time#milliseconds() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ExecutionGraphCacheTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that we can cache AccessExecutionGraphs over multiple accesses.
 */
@Test
public void testExecutionGraphCaching() throws Exception {
	final Time timeout = Time.milliseconds(100L);
	final Time timeToLive = Time.hours(1L);

	final CountingRestfulGateway restfulGateway = createCountingRestfulGateway(expectedJobId, CompletableFuture.completedFuture(expectedExecutionGraph));

	try (ExecutionGraphCache executionGraphCache = new ExecutionGraphCache(timeout, timeToLive)) {
		CompletableFuture<AccessExecutionGraph> accessExecutionGraphFuture = executionGraphCache.getExecutionGraph(expectedJobId, restfulGateway);

		assertEquals(expectedExecutionGraph, accessExecutionGraphFuture.get());

		accessExecutionGraphFuture = executionGraphCache.getExecutionGraph(expectedJobId, restfulGateway);

		assertEquals(expectedExecutionGraph, accessExecutionGraphFuture.get());

		assertThat(restfulGateway.getNumRequestJobCalls(), Matchers.equalTo(1));
	}
}
 
Example 2
Source File: AbstractTaskManagerFileHandlerTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that the {@link AbstractTaskManagerFileHandler} serves the requested file.
 */
@Test
public void testFileServing() throws Exception {
	final Time cacheEntryDuration = Time.milliseconds(1000L);

	final Queue<CompletableFuture<TransientBlobKey>> requestFileUploads = new ArrayDeque<>(1);

	requestFileUploads.add(CompletableFuture.completedFuture(transientBlobKey1));

	final TestTaskManagerFileHandler testTaskManagerFileHandler = createTestTaskManagerFileHandler(cacheEntryDuration, requestFileUploads, EXPECTED_TASK_MANAGER_ID);

	final File outputFile = temporaryFolder.newFile();
	final TestingChannelHandlerContext testingContext = new TestingChannelHandlerContext(outputFile);

	testTaskManagerFileHandler.respondToRequest(
		testingContext,
		HTTP_REQUEST,
		handlerRequest,
		null);

	assertThat(outputFile.length(), is(greaterThan(0L)));
	assertThat(FileUtils.readFileUtf8(outputFile), is(equalTo(fileContent1)));
}
 
Example 3
Source File: SlotManagerConfiguration.java    From flink with Apache License 2.0 5 votes vote down vote up
private static Time getSlotRequestTimeout(final Configuration configuration) {
	final long slotRequestTimeoutMs;
	if (configuration.contains(ResourceManagerOptions.SLOT_REQUEST_TIMEOUT)) {
		LOGGER.warn("Config key {} is deprecated; use {} instead.",
			ResourceManagerOptions.SLOT_REQUEST_TIMEOUT,
			JobManagerOptions.SLOT_REQUEST_TIMEOUT);
		slotRequestTimeoutMs = configuration.getLong(ResourceManagerOptions.SLOT_REQUEST_TIMEOUT);
	} else {
		slotRequestTimeoutMs = configuration.getLong(JobManagerOptions.SLOT_REQUEST_TIMEOUT);
	}
	return Time.milliseconds(slotRequestTimeoutMs);
}
 
Example 4
Source File: JobLeaderIdServiceTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the initial job registration registers a timeout which will call
 * {@link JobLeaderIdActions#notifyJobTimeout(JobID, UUID)} when executed.
 */
@Test
public void testInitialJobTimeout() throws Exception {
	final JobID jobId = new JobID();
	TestingHighAvailabilityServices highAvailabilityServices = new TestingHighAvailabilityServices();
	SettableLeaderRetrievalService leaderRetrievalService = new SettableLeaderRetrievalService(
		null,
		null);

	highAvailabilityServices.setJobMasterLeaderRetriever(jobId, leaderRetrievalService);

	ScheduledExecutor scheduledExecutor = mock(ScheduledExecutor.class);
	Time timeout = Time.milliseconds(5000L);
	JobLeaderIdActions jobLeaderIdActions = mock(JobLeaderIdActions.class);

	JobLeaderIdService jobLeaderIdService = new JobLeaderIdService(
		highAvailabilityServices,
		scheduledExecutor,
		timeout);

	jobLeaderIdService.start(jobLeaderIdActions);

	jobLeaderIdService.addJob(jobId);

	assertTrue(jobLeaderIdService.containsJob(jobId));

	ArgumentCaptor<Runnable> runnableArgumentCaptor = ArgumentCaptor.forClass(Runnable.class);
	verify(scheduledExecutor).schedule(runnableArgumentCaptor.capture(), anyLong(), any(TimeUnit.class));

	Runnable timeoutRunnable = runnableArgumentCaptor.getValue();
	timeoutRunnable.run();

	ArgumentCaptor<UUID> timeoutIdArgumentCaptor = ArgumentCaptor.forClass(UUID.class);

	verify(jobLeaderIdActions, times(1)).notifyJobTimeout(eq(jobId), timeoutIdArgumentCaptor.capture());

	assertTrue(jobLeaderIdService.isValidTimeout(jobId, timeoutIdArgumentCaptor.getValue()));
}
 
Example 5
Source File: EmbeddedExecutorFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public PipelineExecutor getExecutor(final Configuration configuration) {
	checkNotNull(configuration);
	return new EmbeddedExecutor(
			submittedJobIds,
			dispatcherGateway,
			jobId -> {
				final Time timeout = Time.milliseconds(configuration.get(ClientOptions.CLIENT_TIMEOUT).toMillis());
				return new EmbeddedJobClient(jobId, dispatcherGateway, retryExecutor, timeout);
			});
}
 
Example 6
Source File: ConfigurationUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
public static Time getStandaloneClusterStartupPeriodTime(Configuration configuration) {
	final Time timeout;
	long standaloneClusterStartupPeriodTime = configuration.getLong(ResourceManagerOptions.STANDALONE_CLUSTER_STARTUP_PERIOD_TIME);
	if (standaloneClusterStartupPeriodTime >= 0) {
		timeout = Time.milliseconds(standaloneClusterStartupPeriodTime);
	} else {
		timeout = Time.milliseconds(configuration.getLong(JobManagerOptions.SLOT_REQUEST_TIMEOUT));
	}
	return timeout;
}
 
Example 7
Source File: WebMonitorEndpoint.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public WebMonitorEndpoint(
		RestServerEndpointConfiguration endpointConfiguration,
		GatewayRetriever<? extends T> leaderRetriever,
		Configuration clusterConfiguration,
		RestHandlerConfiguration restConfiguration,
		GatewayRetriever<ResourceManagerGateway> resourceManagerRetriever,
		TransientBlobService transientBlobService,
		ExecutorService executor,
		MetricFetcher metricFetcher,
		LeaderElectionService leaderElectionService,
		FatalErrorHandler fatalErrorHandler) throws IOException {
	super(endpointConfiguration);
	this.leaderRetriever = Preconditions.checkNotNull(leaderRetriever);
	this.clusterConfiguration = Preconditions.checkNotNull(clusterConfiguration);
	this.restConfiguration = Preconditions.checkNotNull(restConfiguration);
	this.resourceManagerRetriever = Preconditions.checkNotNull(resourceManagerRetriever);
	this.transientBlobService = Preconditions.checkNotNull(transientBlobService);
	this.executor = Preconditions.checkNotNull(executor);

	this.executionGraphCache = new ExecutionGraphCache(
		restConfiguration.getTimeout(),
		Time.milliseconds(restConfiguration.getRefreshInterval()));

	this.checkpointStatsCache = new CheckpointStatsCache(
		restConfiguration.getMaxCheckpointStatisticCacheEntries());

	this.metricFetcher = metricFetcher;

	this.leaderElectionService = Preconditions.checkNotNull(leaderElectionService);
	this.fatalErrorHandler = Preconditions.checkNotNull(fatalErrorHandler);
}
 
Example 8
Source File: AbstractTaskManagerFileHandlerTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that file cache entries expire.
 */
@Test
public void testFileCacheExpiration() throws Exception {
	final Time cacheEntryDuration = Time.milliseconds(5L);

	final File outputFile = runFileCachingTest(cacheEntryDuration, cacheEntryDuration);

	assertThat(outputFile.length(), is(greaterThan(0L)));
	assertThat(FileUtils.readFileUtf8(outputFile), is(equalTo(fileContent2)));
}
 
Example 9
Source File: SystemResourcesCounterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testObtainAnyMetrics() throws InterruptedException {
	SystemResourcesCounter systemResources = new SystemResourcesCounter(Time.milliseconds(10));
	double initialCpuIdle = systemResources.getCpuIdle();

	systemResources.start();
	// wait for stats to update/calculate
	try {
		double cpuIdle;
		do {
			Thread.sleep(1);
			cpuIdle = systemResources.getCpuIdle();
		}
		while (initialCpuIdle == cpuIdle || Double.isNaN(cpuIdle) || cpuIdle == 0.0);
	}
	finally {
		systemResources.shutdown();
		systemResources.join();
	}

	double totalCpuUsage = systemResources.getCpuIrq() +
		systemResources.getCpuNice() +
		systemResources.getCpuSoftIrq() +
		systemResources.getCpuSys() +
		systemResources.getCpuUser() +
		systemResources.getIOWait();

	assertTrue(
		"There should be at least one processor",
		systemResources.getProcessorsCount() > 0);
	assertTrue(
		"There should be at least one network interface",
		systemResources.getNetworkInterfaceNames().length > 0);
	assertEquals(100.0, totalCpuUsage + systemResources.getCpuIdle(), EPSILON);
}
 
Example 10
Source File: FailureRateRestartStrategy.java    From flink with Apache License 2.0 5 votes vote down vote up
public static FailureRateRestartStrategyFactory createFactory(Configuration configuration) throws Exception {
	int maxFailuresPerInterval = configuration.getInteger(RestartStrategyOptions.RESTART_STRATEGY_FAILURE_RATE_MAX_FAILURES_PER_INTERVAL);
	long failuresInterval = configuration.get(RestartStrategyOptions.RESTART_STRATEGY_FAILURE_RATE_FAILURE_RATE_INTERVAL)
		.toMillis();
	long delay = configuration.get(RestartStrategyOptions.RESTART_STRATEGY_FAILURE_RATE_DELAY).toMillis();

	return new FailureRateRestartStrategyFactory(
		maxFailuresPerInterval,
		Time.milliseconds(failuresInterval),
		Time.milliseconds(delay));
}
 
Example 11
Source File: TaskExecutorToResourceManagerConnection.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
protected CompletableFuture<RegistrationResponse> invokeRegistration(
		ResourceManagerGateway resourceManager, ResourceManagerId fencingToken, long timeoutMillis) throws Exception {

	Time timeout = Time.milliseconds(timeoutMillis);
	return resourceManager.registerTaskExecutor(
		taskExecutorAddress,
		resourceID,
		dataPort,
		hardwareDescription,
		timeout);
}
 
Example 12
Source File: EmbeddedJobClient.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<JobExecutionResult> getJobExecutionResult(final ClassLoader userClassloader) {
	checkNotNull(userClassloader);

	final Time retryPeriod = Time.milliseconds(100L);
	return JobStatusPollingUtils.getJobResult(dispatcherGateway, jobId, retryExecutor, timeout, retryPeriod)
			.thenApply((jobResult) -> {
				try {
					return jobResult.toJobExecutionResult(userClassloader);
				} catch (Throwable t) {
					throw new CompletionException(new Exception("Job " + jobId + " failed", t));
				}
			});
}
 
Example 13
Source File: MetricFetcherImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Nonnull
public static <T extends RestfulGateway> MetricFetcherImpl<T> fromConfiguration(
		final Configuration configuration,
		final MetricQueryServiceRetriever metricQueryServiceGatewayRetriever, final GatewayRetriever<T> dispatcherGatewayRetriever,
		final ExecutorService executor) {
	final Time timeout = Time.milliseconds(configuration.getLong(WebOptions.TIMEOUT));
	final long updateInterval = configuration.getLong(MetricOptions.METRIC_FETCHER_UPDATE_INTERVAL);

	return new MetricFetcherImpl<>(
		dispatcherGatewayRetriever,
		metricQueryServiceGatewayRetriever,
		executor,
		timeout,
		updateInterval);
}
 
Example 14
Source File: SlotPoolImpl.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
public SlotPoolImpl(JobID jobId) {
	this(
		jobId,
		SystemClock.getInstance(),
		AkkaUtils.getDefaultTimeout(),
		Time.milliseconds(JobManagerOptions.SLOT_IDLE_TIMEOUT.defaultValue()));
}
 
Example 15
Source File: FutureUtilsTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests that the delay is respected between subsequent retries of a retry future with retry delay.
 */
@Test
public void testRetryWithDelay() throws Exception {
	final int retries = 4;
	final Time delay = Time.milliseconds(5L);
	final AtomicInteger countDown = new AtomicInteger(retries);

	long start = System.currentTimeMillis();

	CompletableFuture<Boolean> retryFuture = FutureUtils.retryWithDelay(
		() -> {
			if (countDown.getAndDecrement() == 0) {
				return CompletableFuture.completedFuture(true);
			} else {
				return FutureUtils.completedExceptionally(new FlinkException("Test exception."));
			}
		},
		retries,
		delay,
		TestingUtils.defaultScheduledExecutor());

	Boolean result = retryFuture.get();

	long completionTime = System.currentTimeMillis() - start;

	assertTrue(result);
	assertTrue("The completion time should be at least rertries times delay between retries.", completionTime >= retries * delay.toMilliseconds());
}
 
Example 16
Source File: SubtaskExecutionAttemptAccumulatorsHandlerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testHandleRequest() throws Exception {

	// Instance the handler.
	final RestHandlerConfiguration restHandlerConfiguration = RestHandlerConfiguration.fromConfiguration(new Configuration());

	final SubtaskExecutionAttemptAccumulatorsHandler handler = new SubtaskExecutionAttemptAccumulatorsHandler(
		() -> null,
		Time.milliseconds(100L),
		Collections.emptyMap(),
		SubtaskExecutionAttemptAccumulatorsHeaders.getInstance(),
		new ExecutionGraphCache(
			restHandlerConfiguration.getTimeout(),
			Time.milliseconds(restHandlerConfiguration.getRefreshInterval())),
		TestingUtils.defaultExecutor());

	// Instance a empty request.
	final HandlerRequest<EmptyRequestBody, SubtaskAttemptMessageParameters> request = new HandlerRequest<>(
		EmptyRequestBody.getInstance(),
		new SubtaskAttemptMessageParameters()
	);

	final Map<String, OptionalFailure<Accumulator<?, ?>>> userAccumulators = new HashMap<>(3);
	userAccumulators.put("IntCounter", OptionalFailure.of(new IntCounter(10)));
	userAccumulators.put("LongCounter", OptionalFailure.of(new LongCounter(100L)));
	userAccumulators.put("Failure", OptionalFailure.ofFailure(new FlinkRuntimeException("Test")));

	// Instance the expected result.
	final StringifiedAccumulatorResult[] accumulatorResults =
		StringifiedAccumulatorResult.stringifyAccumulatorResults(userAccumulators);

	final int attemptNum = 1;
	final int subtaskIndex = 2;

	// Instance the tested execution.
	final ArchivedExecution execution = new ArchivedExecution(
		accumulatorResults,
		null,
		new ExecutionAttemptID(),
		attemptNum,
		ExecutionState.FINISHED,
		null,
		null,
		null,
		subtaskIndex,
		new long[ExecutionState.values().length]);

	// Invoke tested method.
	final SubtaskExecutionAttemptAccumulatorsInfo accumulatorsInfo = handler.handleRequest(request, execution);

	final ArrayList<UserAccumulator> userAccumulatorList = new ArrayList<>(userAccumulators.size());
	for (StringifiedAccumulatorResult accumulatorResult : accumulatorResults) {
		userAccumulatorList.add(
			new UserAccumulator(
				accumulatorResult.getName(),
				accumulatorResult.getType(),
				accumulatorResult.getValue()));
	}

	final SubtaskExecutionAttemptAccumulatorsInfo expected = new SubtaskExecutionAttemptAccumulatorsInfo(
		subtaskIndex,
		attemptNum,
		execution.getAttemptId().toString(),
		userAccumulatorList);

	// Verify.
	assertEquals(expected, accumulatorsInfo);
}
 
Example 17
Source File: DefaultSchedulerBatchSchedulingTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that a batch job can be executed with fewer slots than its parallelism.
 * See FLINK-13187 for more information.
 */
@Test
public void testSchedulingOfJobWithFewerSlotsThanParallelism() throws Exception {
	final int parallelism = 5;
	final Time batchSlotTimeout = Time.milliseconds(5L);
	final JobGraph jobGraph = createJobGraph(parallelism);
	jobGraph.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES_WITH_BATCH_SLOT_REQUEST);

	try (final SlotPoolImpl slotPool = createSlotPool(mainThreadExecutor, batchSlotTimeout)) {
		final ArrayBlockingQueue<ExecutionAttemptID> submittedTasksQueue = new ArrayBlockingQueue<>(parallelism);
		TestingTaskExecutorGateway testingTaskExecutorGateway = new TestingTaskExecutorGatewayBuilder()
			.setSubmitTaskConsumer(
				(tdd, ignored) -> {
					submittedTasksQueue.offer(tdd.getExecutionAttemptId());
					return CompletableFuture.completedFuture(Acknowledge.get());
				})
			.createTestingTaskExecutorGateway();

		// register a single slot at the slot pool
		SlotPoolUtils.offerSlots(
			slotPool,
			mainThreadExecutor,
			Collections.singletonList(ResourceProfile.ANY),
			new RpcTaskManagerGateway(testingTaskExecutorGateway, JobMasterId.generate()));

		final SlotProvider slotProvider = createSlotProvider(slotPool, mainThreadExecutor);
		final SchedulerNG scheduler = createScheduler(jobGraph, slotProvider, batchSlotTimeout);

		final GloballyTerminalJobStatusListener jobStatusListener = new GloballyTerminalJobStatusListener();
		scheduler.registerJobStatusListener(jobStatusListener);
		startScheduling(scheduler, mainThreadExecutor);

		// wait until the batch slot timeout has been reached
		Thread.sleep(batchSlotTimeout.toMilliseconds());

		final CompletableFuture<JobStatus> terminationFuture = jobStatusListener.getTerminationFuture();

		for (int i = 0; i < parallelism; i++) {
			final CompletableFuture<ExecutionAttemptID> submittedTaskFuture = CompletableFuture.supplyAsync(CheckedSupplier.unchecked(submittedTasksQueue::take));

			// wait until one of them is completed
			CompletableFuture.anyOf(submittedTaskFuture, terminationFuture).join();

			if (submittedTaskFuture.isDone()) {
				finishExecution(submittedTaskFuture.get(), scheduler, mainThreadExecutor);
			} else {
				fail(String.format("Job reached a globally terminal state %s before all executions were finished.", terminationFuture.get()));
			}
		}

		assertThat(terminationFuture.get(), is(JobStatus.FINISHED));
	}
}
 
Example 18
Source File: RpcGatewayRetrieverTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the RpcGatewayRetriever can retrieve the specified gateway type from a leader retrieval service.
 */
@Test
public void testRpcGatewayRetrieval() throws Exception {
	final String expectedValue = "foobar";
	final String expectedValue2 = "barfoo";
	final UUID leaderSessionId = UUID.randomUUID();

	RpcGatewayRetriever<UUID, DummyGateway> gatewayRetriever = new RpcGatewayRetriever<>(rpcService, DummyGateway.class, Function.identity(), 0, Time.milliseconds(0L));
	SettableLeaderRetrievalService settableLeaderRetrievalService = new SettableLeaderRetrievalService();
	DummyRpcEndpoint dummyRpcEndpoint = new DummyRpcEndpoint(rpcService, "dummyRpcEndpoint1", expectedValue);
	DummyRpcEndpoint dummyRpcEndpoint2 = new DummyRpcEndpoint(rpcService, "dummyRpcEndpoint2", expectedValue2);
	rpcService.registerGateway(dummyRpcEndpoint.getAddress(), dummyRpcEndpoint.getSelfGateway(DummyGateway.class));
	rpcService.registerGateway(dummyRpcEndpoint2.getAddress(), dummyRpcEndpoint2.getSelfGateway(DummyGateway.class));

	try {
		dummyRpcEndpoint.start();
		dummyRpcEndpoint2.start();

		settableLeaderRetrievalService.start(gatewayRetriever);

		final CompletableFuture<DummyGateway> gatewayFuture = gatewayRetriever.getFuture();

		assertFalse(gatewayFuture.isDone());

		settableLeaderRetrievalService.notifyListener(dummyRpcEndpoint.getAddress(), leaderSessionId);

		final DummyGateway dummyGateway = gatewayFuture.get(TIMEOUT.toMilliseconds(), TimeUnit.MILLISECONDS);

		assertEquals(dummyRpcEndpoint.getAddress(), dummyGateway.getAddress());
		assertEquals(expectedValue, dummyGateway.foobar(TIMEOUT).get(TIMEOUT.toMilliseconds(), TimeUnit.MILLISECONDS));

		// elect a new leader
		settableLeaderRetrievalService.notifyListener(dummyRpcEndpoint2.getAddress(), leaderSessionId);

		final CompletableFuture<DummyGateway> gatewayFuture2 = gatewayRetriever.getFuture();
		final DummyGateway dummyGateway2 = gatewayFuture2.get(TIMEOUT.toMilliseconds(), TimeUnit.MILLISECONDS);

		assertEquals(dummyRpcEndpoint2.getAddress(), dummyGateway2.getAddress());
		assertEquals(expectedValue2, dummyGateway2.foobar(TIMEOUT).get(TIMEOUT.toMilliseconds(), TimeUnit.MILLISECONDS));
	} finally {
		RpcUtils.terminateRpcEndpoint(dummyRpcEndpoint, TIMEOUT);
		RpcUtils.terminateRpcEndpoint(dummyRpcEndpoint2, TIMEOUT);
	}
}
 
Example 19
Source File: FileArchivedExecutionGraphStoreTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that an expired execution graph is removed from the execution graph store.
 */
@Test
public void testExecutionGraphExpiration() throws Exception {
	final File rootDir = temporaryFolder.newFolder();

	final Time expirationTime = Time.milliseconds(1L);

	final ManuallyTriggeredScheduledExecutor scheduledExecutor = new ManuallyTriggeredScheduledExecutor();

	final ManualTicker manualTicker = new ManualTicker();

	try (final FileArchivedExecutionGraphStore executionGraphStore = new FileArchivedExecutionGraphStore(
		rootDir,
		expirationTime,
		Integer.MAX_VALUE,
		10000L,
		scheduledExecutor,
		manualTicker)) {

		final ArchivedExecutionGraph executionGraph = new ArchivedExecutionGraphBuilder().setState(JobStatus.FINISHED).build();

		executionGraphStore.put(executionGraph);

		// there should one execution graph
		assertThat(executionGraphStore.size(), Matchers.equalTo(1));

		manualTicker.advanceTime(expirationTime.toMilliseconds(), TimeUnit.MILLISECONDS);

		// this should trigger the cleanup after expiration
		scheduledExecutor.triggerScheduledTasks();

		assertThat(executionGraphStore.size(), Matchers.equalTo(0));

		assertThat(executionGraphStore.get(executionGraph.getJobID()), Matchers.nullValue());

		final File storageDirectory = executionGraphStore.getStorageDir();

		// check that the persisted file has been deleted
		assertThat(storageDirectory.listFiles().length, Matchers.equalTo(0));
	}
}
 
Example 20
Source File: ExecutionGraphCacheTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that cache entries are cleaned up when their TTL has expired upon
 * calling {@link ExecutionGraphCache#cleanup()}.
 */
@Test
public void testCacheEntryCleanup() throws Exception {
	final Time timeout = Time.milliseconds(100L);
	final Time timeToLive = Time.milliseconds(1L);
	final JobID expectedJobId2 = new JobID();
	final ArchivedExecutionGraph expectedExecutionGraph2 = new ArchivedExecutionGraphBuilder().build();

	final AtomicInteger requestJobCalls = new AtomicInteger(0);
	final TestingRestfulGateway restfulGateway = TestingRestfulGateway.newBuilder()
		.setRequestJobFunction(
			jobId -> {
				requestJobCalls.incrementAndGet();
				if (jobId.equals(expectedJobId)) {
					return CompletableFuture.completedFuture(expectedExecutionGraph);
				} else if (jobId.equals(expectedJobId2)) {
					return CompletableFuture.completedFuture(expectedExecutionGraph2);
				} else {
					throw new AssertionError("Invalid job id received.");
				}
			}
		)
		.build();

	try (ExecutionGraphCache executionGraphCache = new ExecutionGraphCache(timeout, timeToLive)) {

		CompletableFuture<AccessExecutionGraph> executionGraph1Future = executionGraphCache.getExecutionGraph(expectedJobId, restfulGateway);

		CompletableFuture<AccessExecutionGraph> executionGraph2Future = executionGraphCache.getExecutionGraph(expectedJobId2, restfulGateway);

		assertEquals(expectedExecutionGraph, executionGraph1Future.get());

		assertEquals(expectedExecutionGraph2, executionGraph2Future.get());

		assertThat(requestJobCalls.get(), Matchers.equalTo(2));

		Thread.sleep(timeToLive.toMilliseconds());

		executionGraphCache.cleanup();

		assertTrue(executionGraphCache.size() == 0);
	}
}