Java Code Examples for org.apache.flink.util.FlinkException

The following examples show how to use org.apache.flink.util.FlinkException. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: pulsar-flink   Source File: FlinkPulsarSourceTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testClosePartitionDiscovererWhenFetcherFails() throws Exception {
    final FlinkException failureCause = new FlinkException("Run fetcher failure.");

    // in this scenario, the partition discoverer will be concurrently accessed;
    // use the WakeupBeforeCloseTestingPartitionDiscoverer to verify that we always call
    // wakeup() before closing the discoverer
    final DummyPartitionDiscoverer testDiscoverer = new DummyPartitionDiscoverer();
    final PulsarFetcher<String> mock = mock(PulsarFetcher.class);
    doThrow(failureCause).when(mock).runFetchLoop();
    final DummyFlinkPulsarSource<String> source = new DummyFlinkPulsarSource<>(
            () -> mock,
            testDiscoverer,
            dummyProperties);

    testFailingSourceLifecycle(source, failureCause);
    assertTrue("partitionDiscoverer should be closed when consumer is closed", testDiscoverer.isClosed());
}
 
Example 2
Source Project: Flink-CEPplus   Source File: ClassPathJobGraphRetriever.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public JobGraph retrieveJobGraph(Configuration configuration) throws FlinkException {
	final PackagedProgram packagedProgram = createPackagedProgram();
	final int defaultParallelism = configuration.getInteger(CoreOptions.DEFAULT_PARALLELISM);
	try {
		final JobGraph jobGraph = PackagedProgramUtils.createJobGraph(
			packagedProgram,
			configuration,
			defaultParallelism,
			jobId);
		jobGraph.setAllowQueuedScheduling(true);
		jobGraph.setSavepointRestoreSettings(savepointRestoreSettings);

		return jobGraph;
	} catch (Exception e) {
		throw new FlinkException("Could not create the JobGraph from the provided user code jar.", e);
	}
}
 
Example 3
Source Project: flink   Source File: JobMaster.java    License: Apache License 2.0 6 votes vote down vote up
private void startJobMasterServices() throws Exception {
	startHeartbeatServices();

	// start the slot pool make sure the slot pool now accepts messages for this leader
	slotPool.start(getFencingToken(), getAddress(), getMainThreadExecutor());
	scheduler.start(getMainThreadExecutor());

	//TODO: Remove once the ZooKeeperLeaderRetrieval returns the stored address upon start
	// try to reconnect to previously known leader
	reconnectToResourceManager(new FlinkException("Starting JobMaster component."));

	// job is ready to go, try to establish connection with resource manager
	//   - activate leader retrieval for the resource manager
	//   - on notification of the leader, the connection will be established and
	//     the slot pool will start requesting slots
	resourceManagerLeaderRetriever.start(new ResourceManagerLeaderListener());
}
 
Example 4
Source Project: Flink-CEPplus   Source File: TaskTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testInvokableInstantiationFailed() throws Exception {
	final QueuedNoOpTaskManagerActions taskManagerActions = new QueuedNoOpTaskManagerActions();
	final Task task = new TaskBuilder()
		.setTaskManagerActions(taskManagerActions)
		.setInvokable(InvokableNonInstantiable.class)
		.build();

	// should fail
	task.run();

	// verify final state
	assertEquals(ExecutionState.FAILED, task.getExecutionState());
	assertTrue(task.isCanceledOrFailed());
	assertTrue(task.getFailureCause().getMessage().contains("instantiate"));

	taskManagerActions.validateListenerMessage(
		ExecutionState.FAILED, task, new FlinkException("Could not instantiate the task's invokable class."));
}
 
Example 5
Source Project: flink   Source File: JobSubmitHandler.java    License: Apache License 2.0 6 votes vote down vote up
private CompletableFuture<JobGraph> uploadJobGraphFiles(
		DispatcherGateway gateway,
		CompletableFuture<JobGraph> jobGraphFuture,
		Collection<Path> jarFiles,
		Collection<Tuple2<String, Path>> artifacts,
		Configuration configuration) {
	CompletableFuture<Integer> blobServerPortFuture = gateway.getBlobServerPort(timeout);

	return jobGraphFuture.thenCombine(blobServerPortFuture, (JobGraph jobGraph, Integer blobServerPort) -> {
		final InetSocketAddress address = new InetSocketAddress(gateway.getHostname(), blobServerPort);
		try {
			ClientUtils.uploadJobGraphFiles(jobGraph, jarFiles, artifacts, () -> new BlobClient(address, configuration));
		} catch (FlinkException e) {
			throw new CompletionException(new RestHandlerException(
				"Could not upload job files.",
				HttpResponseStatus.INTERNAL_SERVER_ERROR,
				e));
		}
		return jobGraph;
	});
}
 
Example 6
/**
 * Tests the triggering and exceptional completion of an asynchronous operation.
 */
@Test
public void testOperationFailure() throws Exception {
	final FlinkException testException = new FlinkException("Test exception");
	final TestingRestfulGateway testingRestfulGateway = new TestingRestfulGateway.Builder()
		.setTriggerSavepointFunction((JobID jobId, String directory) -> FutureUtils.completedExceptionally(testException))
		.build();

	// trigger the operation
	final TriggerId triggerId = testingTriggerHandler.handleRequest(
		triggerOperationRequest(),
		testingRestfulGateway).get().getTriggerId();

	AsynchronousOperationResult<OperationResult> operationResult = testingStatusHandler.handleRequest(
		statusOperationRequest(triggerId),
		testingRestfulGateway).get();

	assertThat(operationResult.queueStatus().getId(), is(QueueStatus.completed().getId()));

	final OperationResult resource = operationResult.resource();
	assertThat(resource.throwable, is(testException));
}
 
Example 7
Source Project: flink   Source File: TaskExecutor.java    License: Apache License 2.0 6 votes vote down vote up
private void closeJobManagerConnectionIfNoAllocatedResources(JobID jobId) {
	// check whether we still have allocated slots for the same job
	if (taskSlotTable.getAllocationIdsPerJob(jobId).isEmpty() && !partitionTable.hasTrackedPartitions(jobId)) {
		// we can remove the job from the job leader service
		try {
			jobLeaderService.removeJob(jobId);
		} catch (Exception e) {
			log.info("Could not remove job {} from JobLeaderService.", jobId, e);
		}

		closeJobManagerConnection(
			jobId,
			new FlinkException("TaskExecutor " + getAddress() +
				" has no more allocated slots for job " + jobId + '.'));
	}
}
 
Example 8
Source Project: flink   Source File: AbstractCustomCommandLine.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Override configuration settings by specified command line options.
 *
 * @param commandLine containing the overriding values
 * @return Effective configuration with the overridden configuration settings
 */
protected Configuration applyCommandLineOptionsToConfiguration(CommandLine commandLine) throws FlinkException {
	final Configuration resultingConfiguration = new Configuration(configuration);

	if (commandLine.hasOption(addressOption.getOpt())) {
		String addressWithPort = commandLine.getOptionValue(addressOption.getOpt());
		InetSocketAddress jobManagerAddress = ClientUtils.parseHostPortAddress(addressWithPort);
		setJobManagerAddressInConfig(resultingConfiguration, jobManagerAddress);
	}

	if (commandLine.hasOption(zookeeperNamespaceOption.getOpt())) {
		String zkNamespace = commandLine.getOptionValue(zookeeperNamespaceOption.getOpt());
		resultingConfiguration.setString(HighAvailabilityOptions.HA_CLUSTER_ID, zkNamespace);
	}

	return resultingConfiguration;
}
 
Example 9
Source Project: Flink-CEPplus   Source File: DispatcherTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that the {@link Dispatcher} terminates if it cannot recover jobs from
 * the {@link SubmittedJobGraphStore}. See FLINK-8943.
 */
@Test
public void testFatalErrorAfterJobRecoveryFailure() throws Exception {
	final FlinkException testException = new FlinkException("Test exception");

	dispatcher = createAndStartDispatcher(heartbeatServices, haServices, new ExpectedJobIdJobManagerRunnerFactory(TEST_JOB_ID, createdJobManagerRunnerLatch));

	dispatcher.waitUntilStarted();

	final SubmittedJobGraph submittedJobGraph = new SubmittedJobGraph(jobGraph);
	submittedJobGraphStore.putJobGraph(submittedJobGraph);

	submittedJobGraphStore.setRecoverJobGraphFunction(
		(JobID jobId, Map<JobID, SubmittedJobGraph> submittedJobs) -> {
			throw testException;
		});

	electDispatcher();

	// we expect that a fatal error occurred
	final Throwable error = fatalErrorHandler.getErrorFuture().get(TIMEOUT.toMilliseconds(), TimeUnit.MILLISECONDS);

	assertThat(ExceptionUtils.findThrowableWithMessage(error, testException.getMessage()).isPresent(), is(true));

	fatalErrorHandler.clearError();
}
 
Example 10
Source Project: flink   Source File: DispatcherHATest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that a fatal error is reported if the job recovery fails.
 */
@Test
public void testFailingRecoveryIsAFatalError() throws Exception {
	final String exceptionMessage = "Job recovery test failure.";
	final Supplier<Exception> exceptionSupplier = () -> new FlinkException(exceptionMessage);
	final TestingHighAvailabilityServices haServices = new TestingHighAvailabilityServicesBuilder()
		.setSubmittedJobGraphStore(new FailingSubmittedJobGraphStore(exceptionSupplier))
		.build();

	final HATestingDispatcher dispatcher = createDispatcher(haServices);
	dispatcher.start();

	final Throwable failure = testingFatalErrorHandler.getErrorFuture().get();

	assertThat(ExceptionUtils.findThrowableWithMessage(failure, exceptionMessage).isPresent(), is(true));

	testingFatalErrorHandler.clearError();
}
 
Example 11
Source Project: flink   Source File: ZooKeeperCompletedCheckpointStore.java    License: Apache License 2.0 6 votes vote down vote up
private static CompletedCheckpoint retrieveCompletedCheckpoint(Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String> stateHandlePath) throws FlinkException {
	long checkpointId = pathToCheckpointId(stateHandlePath.f1);

	LOG.info("Trying to retrieve checkpoint {}.", checkpointId);

	try {
		return stateHandlePath.f0.retrieveState();
	} catch (ClassNotFoundException cnfe) {
		throw new FlinkException("Could not retrieve checkpoint " + checkpointId + " from state handle under " +
			stateHandlePath.f1 + ". This indicates that you are trying to recover from state written by an " +
			"older Flink version which is not compatible. Try cleaning the state handle store.", cnfe);
	} catch (IOException ioe) {
		throw new FlinkException("Could not retrieve checkpoint " + checkpointId + " from state handle under " +
			stateHandlePath.f1 + ". This indicates that the retrieved state handle is broken. Try cleaning the " +
			"state handle store.", ioe);
	}
}
 
Example 12
Source Project: Flink-CEPplus   Source File: ExecutionVertexLocalityTest.java    License: Apache License 2.0 6 votes vote down vote up
private void initializeLocation(ExecutionVertex vertex, TaskManagerLocation location) throws Exception {
	// we need a bit of reflection magic to initialize the location without going through
	// scheduling paths. we choose to do that, rather than the alternatives:
	//  - mocking the scheduler created fragile tests that break whenever the scheduler is adjusted
	//  - exposing test methods in the ExecutionVertex leads to undesirable setters 

	SlotContext slot = new SimpleSlotContext(
		new AllocationID(),
		location,
		0,
		mock(TaskManagerGateway.class));

	SimpleSlot simpleSlot = new SimpleSlot(slot, mock(SlotOwner.class), 0);

	if (!vertex.getCurrentExecutionAttempt().tryAssignResource(simpleSlot)) {
		throw new FlinkException("Could not assign resource.");
	}
}
 
Example 13
Source Project: flink   Source File: MasterHooks.java    License: Apache License 2.0 6 votes vote down vote up
private static <T> T deserializeState(MasterState state, MasterTriggerRestoreHook<?> hook) throws FlinkException {
	@SuppressWarnings("unchecked")
	final MasterTriggerRestoreHook<T> typedHook = (MasterTriggerRestoreHook<T>) hook;
	final String id = hook.getIdentifier();

	try {
		final SimpleVersionedSerializer<T> deserializer = typedHook.createCheckpointDataSerializer();
		if (deserializer == null) {
			throw new FlinkException("null serializer for state of hook " + hook.getIdentifier());
		}

		return deserializer.deserialize(state.version(), state.bytes());
	}
	catch (Throwable t) {
		throw new FlinkException("Cannot deserialize state for master hook '" + id + '\'', t);
	}
}
 
Example 14
Source Project: Flink-CEPplus   Source File: SlotSharingGroupAssignment.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Called from {@link org.apache.flink.runtime.instance.SharedSlot#releaseSlot(Throwable)}.
 * 
 * @param sharedSlot The slot to be released.
 */
void releaseSharedSlot(SharedSlot sharedSlot) {
	synchronized (lock) {
		if (sharedSlot.markCancelled()) {
			// we are releasing this slot
			
			if (sharedSlot.hasChildren()) {
				final FlinkException cause = new FlinkException("Releasing shared slot parent.");
				// by simply releasing all children, we should eventually release this slot.
				Set<Slot> children = sharedSlot.getSubSlots();
				while (children.size() > 0) {
					children.iterator().next().releaseSlot(cause);
				}
			}
			else {
				// if there are no children that trigger the release, we trigger it directly
				internalDisposeEmptySharedSlot(sharedSlot);
			}
		}
	}
}
 
Example 15
/**
 * Tests the triggering and exceptional completion of an asynchronous operation.
 */
@Test
public void testOperationFailure() throws Exception {
	final FlinkException testException = new FlinkException("Test exception");
	final TestingRestfulGateway testingRestfulGateway = new TestingRestfulGateway.Builder()
		.setTriggerSavepointFunction((JobID jobId, String directory) -> FutureUtils.completedExceptionally(testException))
		.build();

	// trigger the operation
	final TriggerId triggerId = testingTriggerHandler.handleRequest(
		triggerOperationRequest(),
		testingRestfulGateway).get().getTriggerId();

	AsynchronousOperationResult<OperationResult> operationResult = testingStatusHandler.handleRequest(
		statusOperationRequest(triggerId),
		testingRestfulGateway).get();

	assertThat(operationResult.queueStatus().getId(), is(QueueStatus.completed().getId()));

	final OperationResult resource = operationResult.resource();
	assertThat(resource.throwable, is(testException));
}
 
Example 16
Source Project: Flink-CEPplus   Source File: FencedRpcEndpointTest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<Acknowledge> triggerMainThreadExecutorComputation(Time timeout) {
	return CompletableFuture.supplyAsync(
		() -> {
			try {
				computationLatch.await();
			} catch (InterruptedException e) {
				throw new CompletionException(new FlinkException("Waiting on latch failed.", e));
			}

			return value;
		},
		getRpcService().getExecutor())
	.thenApplyAsync(
		(String v) -> Acknowledge.get(),
		getMainThreadExecutor());
}
 
Example 17
Source Project: Flink-CEPplus   Source File: FlinkKafkaConsumerBaseTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testClosePartitionDiscovererWhenOpenThrowException() throws Exception {
	final RuntimeException failureCause = new RuntimeException(new FlinkException("Test partition discoverer exception"));
	final FailingPartitionDiscoverer failingPartitionDiscoverer = new FailingPartitionDiscoverer(failureCause);

	final DummyFlinkKafkaConsumer<String> consumer = new DummyFlinkKafkaConsumer<>(failingPartitionDiscoverer);

	testFailingConsumerLifecycle(consumer, failureCause);
	assertTrue("partitionDiscoverer should be closed when consumer is closed", failingPartitionDiscoverer.isClosed());
}
 
Example 18
Source Project: Flink-CEPplus   Source File: FutureUtilsTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testHandleUncaughtExceptionWithExceptionallyCompletedFuture() {
	final CompletableFuture<String> future = FutureUtils.completedExceptionally(new FlinkException("foobar"));

	final TestingUncaughtExceptionHandler uncaughtExceptionHandler = new TestingUncaughtExceptionHandler();

	FutureUtils.handleUncaughtException(future, uncaughtExceptionHandler);
	assertThat(uncaughtExceptionHandler.hasBeenCalled(), is(true));
}
 
Example 19
Source Project: Flink-CEPplus   Source File: JobResultTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testFailedJobIsFailureResult() {
	final JobResult jobResult = JobResult.createFrom(
		new ArchivedExecutionGraphBuilder()
			.setJobID(new JobID())
			.setState(JobStatus.FAILED)
			.setFailureCause(new ErrorInfo(new FlinkException("Test exception"), 42L))
			.build());

	assertThat(jobResult.isSuccess(), is(false));
}
 
Example 20
Source Project: flink   Source File: TaskExecutor.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public CompletableFuture<TransientBlobKey> requestFileUpload(FileType fileType, Time timeout) {
	log.debug("Request file {} upload.", fileType);

	final String filePath;

	switch (fileType) {
		case LOG:
			filePath = taskManagerConfiguration.getTaskManagerLogPath();
			break;
		case STDOUT:
			filePath = taskManagerConfiguration.getTaskManagerStdoutPath();
			break;
		default:
			filePath = null;
	}

	if (filePath != null && !filePath.isEmpty()) {
		final File file = new File(filePath);

		if (file.exists()) {
			final TransientBlobCache transientBlobService = blobCacheService.getTransientBlobService();
			final TransientBlobKey transientBlobKey;
			try (FileInputStream fileInputStream = new FileInputStream(file)) {
				transientBlobKey = transientBlobService.putTransient(fileInputStream);
			} catch (IOException e) {
				log.debug("Could not upload file {}.", fileType, e);
				return FutureUtils.completedExceptionally(new FlinkException("Could not upload file " + fileType + '.', e));
			}

			return CompletableFuture.completedFuture(transientBlobKey);
		} else {
			log.debug("The file {} does not exist on the TaskExecutor {}.", fileType, getResourceID());
			return FutureUtils.completedExceptionally(new FlinkException("The file " + fileType + " does not exist on the TaskExecutor."));
		}
	} else {
		log.debug("The file {} is unavailable on the TaskExecutor {}.", fileType, getResourceID());
		return FutureUtils.completedExceptionally(new FlinkException("The file " + fileType + " is not available on the TaskExecutor."));
	}
}
 
Example 21
Source Project: Flink-CEPplus   Source File: AccumulatorHelper.java    License: Apache License 2.0 5 votes vote down vote up
private static <R> OptionalFailure<R> wrapUnchecked(String name, Supplier<R> supplier) {
	return OptionalFailure.createFrom(() -> {
		try {
			return supplier.get();
		} catch (RuntimeException ex) {
			LOG.error("Unexpected error while handling accumulator [" + name + "]", ex);
			throw new FlinkException(ex);
		}
	});
}
 
Example 22
Source Project: flink   Source File: FlinkKafkaConsumerBaseTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testClosePartitionDiscovererWhenOpenThrowException() throws Exception {
	final RuntimeException failureCause = new RuntimeException(new FlinkException("Test partition discoverer exception"));
	final FailingPartitionDiscoverer failingPartitionDiscoverer = new FailingPartitionDiscoverer(failureCause);

	final DummyFlinkKafkaConsumer<String> consumer = new DummyFlinkKafkaConsumer<>(failingPartitionDiscoverer);

	testFailingConsumerLifecycle(consumer, failureCause);
	assertTrue("partitionDiscoverer should be closed when consumer is closed", failingPartitionDiscoverer.isClosed());
}
 
Example 23
Source Project: flink   Source File: SlotSharingManagerTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Tests that we can release inner slots and that this triggers the slot release for all
 * its children.
 */
@Test
public void testInnerSlotRelease() {
	final TestingAllocatedSlotActions allocatedSlotActions = new TestingAllocatedSlotActions();

	final SlotSharingManager slotSharingManager = new SlotSharingManager(
		SLOT_SHARING_GROUP_ID,
		allocatedSlotActions,
		SLOT_OWNER);

	SlotSharingManager.MultiTaskSlot rootSlot = slotSharingManager.createRootSlot(
		new SlotRequestId(),
		new CompletableFuture<>(),
		new SlotRequestId());

	SlotSharingManager.MultiTaskSlot multiTaskSlot = rootSlot.allocateMultiTaskSlot(
		new SlotRequestId(),
		new AbstractID());

	SlotSharingManager.SingleTaskSlot singleTaskSlot1 = multiTaskSlot.allocateSingleTaskSlot(
		new SlotRequestId(),
		ResourceProfile.UNKNOWN,
		new AbstractID(),
		Locality.LOCAL);

	SlotSharingManager.MultiTaskSlot multiTaskSlot1 = multiTaskSlot.allocateMultiTaskSlot(
		new SlotRequestId(),
		new AbstractID());

	assertTrue(slotSharingManager.contains(multiTaskSlot1.getSlotRequestId()));
	assertTrue(slotSharingManager.contains(singleTaskSlot1.getSlotRequestId()));
	assertTrue(slotSharingManager.contains(multiTaskSlot.getSlotRequestId()));

	multiTaskSlot.release(new FlinkException("Test exception"));

	assertFalse(slotSharingManager.contains(multiTaskSlot1.getSlotRequestId()));
	assertFalse(slotSharingManager.contains(singleTaskSlot1.getSlotRequestId()));
	assertFalse(slotSharingManager.contains(multiTaskSlot.getSlotRequestId()));
	assertTrue(singleTaskSlot1.getLogicalSlotFuture().isCompletedExceptionally());
}
 
Example 24
Source Project: Flink-CEPplus   Source File: AbstractPythonUDF.java    License: Apache License 2.0 5 votes vote down vote up
static FlinkException createAndLogException(PyException pe, Logger log) {
	StringWriter sw = new StringWriter();
	try (PrintWriter pw = new PrintWriter(sw)) {
		pe.printStackTrace(pw);
	}
	String pythonStackTrace = sw.toString().trim();

	log.error("Python function failed: " + System.lineSeparator() + pythonStackTrace);
	return new FlinkException("Python function failed: " + pythonStackTrace);
}
 
Example 25
Source Project: Flink-CEPplus   Source File: InterpreterUtils.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Deserialize the given python function. If the functions class definition cannot be found we assume that this is
 * the first invocation of this method for a given job and load the python script containing the class definition
 * via jython.
 *
 * @param context the RuntimeContext of the java function
 * @param serFun serialized python UDF
 * @return deserialized python UDF
 * @throws FlinkException if the deserialization failed
 */
@SuppressWarnings("unchecked")
public static <X> X deserializeFunction(RuntimeContext context, byte[] serFun) throws FlinkException {
	if (!jythonInitialized) {
		// This branch is only tested by end-to-end tests
		String path = context.getDistributedCache().getFile(PythonConstants.FLINK_PYTHON_DC_ID).getAbsolutePath();

		String scriptName = PythonStreamExecutionEnvironment.PythonJobParameters.getScriptName(context.getExecutionConfig().getGlobalJobParameters());

		try {
			initPythonInterpreter(
				new String[]{Paths.get(path, scriptName).toString()},
				path,
				scriptName);
		} catch (Exception e) {

			try {
				LOG.error("Initialization of jython failed.", e);
				throw new FlinkRuntimeException("Initialization of jython failed.", e);
			} catch (Exception ie) {
				// this may occur if the initial exception relies on jython being initialized properly
				LOG.error("Initialization of jython failed. Could not print original stacktrace.", ie);
				throw new FlinkRuntimeException("Initialization of jython failed. Could not print original stacktrace.");
			}
		}
	}

	try {
		return (X) SerializationUtils.deserializeObject(serFun);
	} catch (IOException | ClassNotFoundException ex) {
		throw new FlinkException("Deserialization of user-function failed.", ex);
	}
}
 
Example 26
@Test
public void testJobGraphRetrievalFromJar() throws FlinkException, FileNotFoundException {
	final File testJar = TestJob.getTestJobJar();
	final ClassPathJobGraphRetriever classPathJobGraphRetriever = new ClassPathJobGraphRetriever(
		new JobID(),
		SavepointRestoreSettings.none(),
		PROGRAM_ARGUMENTS,
		// No class name specified, but the test JAR "is" on the class path
		null,
		() -> Collections.singleton(testJar));

	final JobGraph jobGraph = classPathJobGraphRetriever.retrieveJobGraph(new Configuration());

	assertThat(jobGraph.getName(), is(equalTo(TestJob.class.getCanonicalName() + "-suffix")));
}
 
Example 27
/**
 * Test that the restore can be stopped via the provided closeable registry.
 */
@Test
public void testCanBeCanceledViaRegistry() throws Exception {
	CloseableRegistry closeableRegistry = new CloseableRegistry();
	OneShotLatch waitForBlock = new OneShotLatch();
	OneShotLatch unblock = new OneShotLatch();
	OperatorStateHandle blockingRestoreHandle = mock(OperatorStateHandle.class);
	when(blockingRestoreHandle.openInputStream()).thenReturn(new BlockingFSDataInputStream(waitForBlock, unblock));

	List<StateObjectCollection<OperatorStateHandle>> sortedRestoreOptions =
		Collections.singletonList(new StateObjectCollection<>(Collections.singletonList(blockingRestoreHandle)));

	BackendRestorerProcedure<OperatorStateBackend, OperatorStateHandle> restorerProcedure =
		new BackendRestorerProcedure<>(backendSupplier, closeableRegistry, "test op state backend");

	AtomicReference<Exception> exceptionReference = new AtomicReference<>(null);
	Thread restoreThread = new Thread(() -> {
		try {
			restorerProcedure.createAndRestore(sortedRestoreOptions);
		} catch (Exception e) {
			exceptionReference.set(e);
		}
	});

	restoreThread.start();
	waitForBlock.await();
	closeableRegistry.close();
	unblock.trigger();
	restoreThread.join();

	Exception exception = exceptionReference.get();
	Assert.assertTrue(exception instanceof FlinkException);
}
 
Example 28
@Override
public void killCluster(ApplicationId applicationId) throws FlinkException {
	try {
		yarnClient.killApplication(applicationId);
		Utils.deleteApplicationFiles(Collections.singletonMap(
			YarnConfigKeys.FLINK_YARN_FILES,
			getYarnFilesDir(applicationId).toUri().toString()));
	} catch (YarnException | IOException e) {
		throw new FlinkException("Could not kill the Yarn Flink cluster with id " + applicationId + '.', e);
	}
}
 
Example 29
Source Project: flink   Source File: AbstractYarnClusterDescriptor.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Method to validate cluster specification before deploy it, it will throw
 * an {@link FlinkException} if the {@link ClusterSpecification} is invalid.
 *
 * @param clusterSpecification cluster specification to check against the configuration of the
 *                             AbstractYarnClusterDescriptor
 * @throws FlinkException if the cluster cannot be started with the provided {@link ClusterSpecification}
 */
private void validateClusterSpecification(ClusterSpecification clusterSpecification) throws FlinkException {
	try {
		final long taskManagerMemorySize = clusterSpecification.getTaskManagerMemoryMB();
		// We do the validation by calling the calculation methods here
		// Internally these methods will check whether the cluster can be started with the provided
		// ClusterSpecification and the configured memory requirements
		final long cutoff = ContaineredTaskManagerParameters.calculateCutoffMB(flinkConfiguration, taskManagerMemorySize);
		TaskManagerServices.calculateHeapSizeMB(taskManagerMemorySize - cutoff, flinkConfiguration);
	} catch (IllegalArgumentException iae) {
		throw new FlinkException("Cannot fulfill the minimum memory requirements with the provided " +
			"cluster specification. Please increase the memory of the cluster.", iae);
	}
}
 
Example 30
Source Project: Flink-CEPplus   Source File: FlinkYarnSessionCli.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public AbstractYarnClusterDescriptor createClusterDescriptor(CommandLine commandLine) throws FlinkException {
	final Configuration effectiveConfiguration = applyCommandLineOptionsToConfiguration(commandLine);

	return createDescriptor(
		effectiveConfiguration,
		yarnConfiguration,
		configurationDirectory,
		commandLine);
}