org.apache.flink.runtime.jobgraph.JobGraph Java Examples

The following examples show how to use org.apache.flink.runtime.jobgraph.JobGraph. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Dispatcher.java    From flink with Apache License 2.0 6 votes vote down vote up
private CompletableFuture<Acknowledge> internalSubmitJob(JobGraph jobGraph) {
	log.info("Submitting job {} ({}).", jobGraph.getJobID(), jobGraph.getName());

	final CompletableFuture<Acknowledge> persistAndRunFuture = waitForTerminatingJobManager(jobGraph.getJobID(), jobGraph, this::persistAndRunJob)
		.thenApply(ignored -> Acknowledge.get());

	return persistAndRunFuture.handleAsync((acknowledge, throwable) -> {
		if (throwable != null) {
			cleanUpJobData(jobGraph.getJobID(), true);

			final Throwable strippedThrowable = ExceptionUtils.stripCompletionException(throwable);
			log.error("Failed to submit job {}.", jobGraph.getJobID(), strippedThrowable);
			throw new CompletionException(
				new JobSubmissionException(jobGraph.getJobID(), "Failed to submit job.", strippedThrowable));
		} else {
			return acknowledge;
		}
	}, getRpcService().getExecutor());
}
 
Example #2
Source File: DefaultSchedulerTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void scheduledVertexOrderFromSchedulingStrategyIsRespected() throws Exception {
	final JobGraph jobGraph = singleJobVertexJobGraph(10);
	final JobVertexID onlyJobVertexId = getOnlyJobVertex(jobGraph).getID();

	final List<ExecutionVertexID> desiredScheduleOrder = Arrays.asList(
		new ExecutionVertexID(onlyJobVertexId, 4),
		new ExecutionVertexID(onlyJobVertexId, 0),
		new ExecutionVertexID(onlyJobVertexId, 3),
		new ExecutionVertexID(onlyJobVertexId, 1),
		new ExecutionVertexID(onlyJobVertexId, 2));

	final TestSchedulingStrategy.Factory schedulingStrategyFactory = new TestSchedulingStrategy.Factory();
	createScheduler(jobGraph, schedulingStrategyFactory);
	final TestSchedulingStrategy schedulingStrategy = schedulingStrategyFactory.getLastCreatedSchedulingStrategy();

	schedulingStrategy.schedule(desiredScheduleOrder);

	final List<ExecutionVertexID> deployedExecutionVertices = testExecutionVertexOperations.getDeployedVertices();

	assertEquals(desiredScheduleOrder, deployedExecutionVertices);
}
 
Example #3
Source File: RestartStrategyTest.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Tests that in a streaming use case where checkpointing is enabled, there is no default strategy set on the
 * client side.
 */
@Test
public void testFallbackStrategyOnClientSideWhenCheckpointingEnabled() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(500);

	env.fromElements(1).print();

	StreamGraph graph = env.getStreamGraph();
	JobGraph jobGraph = graph.getJobGraph();

	RestartStrategies.RestartStrategyConfiguration restartStrategy =
		jobGraph.getSerializedExecutionConfig().deserializeValue(getClass().getClassLoader()).getRestartStrategy();

	Assert.assertNotNull(restartStrategy);
	Assert.assertTrue(restartStrategy instanceof RestartStrategies.FallbackRestartStrategyConfiguration);
}
 
Example #4
Source File: JarHandlerUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
public JobGraph toJobGraph(Configuration configuration) {
	if (!Files.exists(jarFile)) {
		throw new CompletionException(new RestHandlerException(
			String.format("Jar file %s does not exist", jarFile), HttpResponseStatus.BAD_REQUEST));
	}

	try {
		final PackagedProgram packagedProgram = new PackagedProgram(
			jarFile.toFile(),
			entryClass,
			programArgs.toArray(new String[0]));
		return PackagedProgramUtils.createJobGraph(packagedProgram, configuration, parallelism, jobId);
	} catch (final ProgramInvocationException e) {
		throw new CompletionException(e);
	}
}
 
Example #5
Source File: ClientUtilsTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void uploadAndSetUserJars() throws Exception {
	java.nio.file.Path tmpDir = temporaryFolder.newFolder().toPath();
	JobGraph jobGraph = new JobGraph();

	Collection<Path> jars = Arrays.asList(
		new Path(Files.createFile(tmpDir.resolve("jar1.jar")).toString()),
		new Path(Files.createFile(tmpDir.resolve("jar2.jar")).toString()));

	jars.forEach(jobGraph::addJar);

	assertEquals(jars.size(), jobGraph.getUserJars().size());
	assertEquals(0, jobGraph.getUserJarBlobKeys().size());

	ClientUtils.extractAndUploadJobGraphFiles(jobGraph, () -> new BlobClient(new InetSocketAddress("localhost", blobServer.getPort()), new Configuration()));

	assertEquals(jars.size(), jobGraph.getUserJars().size());
	assertEquals(jars.size(), jobGraph.getUserJarBlobKeys().size());
	assertEquals(jars.size(), jobGraph.getUserJarBlobKeys().stream().distinct().count());

	for (PermanentBlobKey blobKey : jobGraph.getUserJarBlobKeys()) {
		blobServer.getFile(jobGraph.getJobID(), blobKey);
	}
}
 
Example #6
Source File: Dispatcher.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private CompletableFuture<Boolean> tryRunRecoveredJobGraph(JobGraph jobGraph, DispatcherId dispatcherId) throws Exception {
	if (leaderElectionService.hasLeadership(dispatcherId.toUUID())) {
		final JobID jobId = jobGraph.getJobID();
		if (jobManagerRunnerFutures.containsKey(jobId)) {
			// we must not release the job graph lock since it can only be locked once and
			// is currently being executed. Once we support multiple locks, we must release
			// the JobGraph here
			log.debug("Ignore added JobGraph because the job {} is already running.", jobId);
			return CompletableFuture.completedFuture(true);
		} else if (runningJobsRegistry.getJobSchedulingStatus(jobId) != RunningJobsRegistry.JobSchedulingStatus.DONE) {
			return waitForTerminatingJobManager(jobId, jobGraph, this::runJob).thenApply(ignored -> true);
		} else {
			log.debug("Ignore added JobGraph because the job {} has already been completed.", jobId);
		}
	}

	return CompletableFuture.completedFuture(false);
}
 
Example #7
Source File: JobMasterTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Nonnull
private JobGraph createJobGraphFromJobVerticesWithCheckpointing(SavepointRestoreSettings savepointRestoreSettings, JobVertex... jobVertices) {
	final JobGraph jobGraph = new JobGraph(jobVertices);

	// enable checkpointing which is required to resume from a savepoint
	final CheckpointCoordinatorConfiguration checkpoinCoordinatorConfiguration = new CheckpointCoordinatorConfiguration(
		1000L,
		1000L,
		1000L,
		1,
		CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION,
		true);
	final JobCheckpointingSettings checkpointingSettings = new JobCheckpointingSettings(
		Collections.emptyList(),
		Collections.emptyList(),
		Collections.emptyList(),
		checkpoinCoordinatorConfiguration,
		null);
	jobGraph.setSnapshotSettings(checkpointingSettings);
	jobGraph.setSavepointRestoreSettings(savepointRestoreSettings);

	return jobGraph;
}
 
Example #8
Source File: DefaultSchedulerTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void failureInfoIsSetAfterTaskFailure() {
	final JobGraph jobGraph = singleNonParallelJobVertexJobGraph();
	final JobID jobId = jobGraph.getJobID();
	final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph);

	final ArchivedExecutionVertex onlyExecutionVertex = Iterables.getOnlyElement(scheduler.requestJob().getAllExecutionVertices());
	final ExecutionAttemptID attemptId = onlyExecutionVertex.getCurrentExecutionAttempt().getAttemptId();

	final String exceptionMessage = "expected exception";
	scheduler.updateTaskExecutionState(new TaskExecutionState(jobId, attemptId, ExecutionState.FAILED, new RuntimeException(exceptionMessage)));

	final ErrorInfo failureInfo = scheduler.requestJob().getFailureInfo();
	assertThat(failureInfo, is(notNullValue()));
	assertThat(failureInfo.getExceptionAsString(), containsString(exceptionMessage));
}
 
Example #9
Source File: JobMasterTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Nonnull
private JobMaster createJobMaster(
		Configuration configuration,
		JobGraph jobGraph,
		HighAvailabilityServices highAvailabilityServices,
		JobManagerSharedServices jobManagerSharedServices,
		HeartbeatServices heartbeatServices,
		OnCompletionActions onCompletionActions) throws Exception {

	return new JobMasterBuilder(jobGraph, rpcService)
		.withConfiguration(configuration)
		.withHighAvailabilityServices(highAvailabilityServices)
		.withJobManagerSharedServices(jobManagerSharedServices)
		.withHeartbeatServices(heartbeatServices)
		.withOnCompletionActions(onCompletionActions)
		.withResourceId(jmResourceId)
		.createJobMaster();
}
 
Example #10
Source File: JobSubmitHandler.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private CompletableFuture<JobGraph> uploadJobGraphFiles(
		DispatcherGateway gateway,
		CompletableFuture<JobGraph> jobGraphFuture,
		Collection<Path> jarFiles,
		Collection<Tuple2<String, Path>> artifacts,
		Configuration configuration) {
	CompletableFuture<Integer> blobServerPortFuture = gateway.getBlobServerPort(timeout);

	return jobGraphFuture.thenCombine(blobServerPortFuture, (JobGraph jobGraph, Integer blobServerPort) -> {
		final InetSocketAddress address = new InetSocketAddress(gateway.getHostname(), blobServerPort);
		try {
			ClientUtils.uploadJobGraphFiles(jobGraph, jarFiles, artifacts, () -> new BlobClient(address, configuration));
		} catch (FlinkException e) {
			throw new CompletionException(new RestHandlerException(
				"Could not upload job files.",
				HttpResponseStatus.INTERNAL_SERVER_ERROR,
				e));
		}
		return jobGraph;
	});
}
 
Example #11
Source File: JarHandlerParameterTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testProvideJobId() throws Exception {
	JobID jobId = new JobID();

	HandlerRequest<REQB, M> request = createRequest(
		getJarRequestBodyWithJobId(jobId),
		getUnresolvedJarMessageParameters(),
		getUnresolvedJarMessageParameters(),
		jarWithManifest
	);

	handleRequest(request);

	Optional<JobGraph> jobGraph = getLastSubmittedJobGraphAndReset();

	assertThat(jobGraph.isPresent(), is(true));
	assertThat(jobGraph.get().getJobID(), is(equalTo(jobId)));
}
 
Example #12
Source File: ClientUtilsTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void uploadAndSetUserJars() throws Exception {
	java.nio.file.Path tmpDir = temporaryFolder.newFolder().toPath();
	JobGraph jobGraph = new JobGraph();

	Collection<Path> jars = Arrays.asList(
		new Path(Files.createFile(tmpDir.resolve("jar1.jar")).toString()),
		new Path(Files.createFile(tmpDir.resolve("jar2.jar")).toString()));

	jars.forEach(jobGraph::addJar);

	assertEquals(jars.size(), jobGraph.getUserJars().size());
	assertEquals(0, jobGraph.getUserJarBlobKeys().size());

	ClientUtils.extractAndUploadJobGraphFiles(jobGraph, () -> new BlobClient(new InetSocketAddress("localhost", blobServer.getPort()), new Configuration()));

	assertEquals(jars.size(), jobGraph.getUserJars().size());
	assertEquals(jars.size(), jobGraph.getUserJarBlobKeys().size());
	assertEquals(jars.size(), jobGraph.getUserJarBlobKeys().stream().distinct().count());

	for (PermanentBlobKey blobKey : jobGraph.getUserJarBlobKeys()) {
		blobServer.getFile(jobGraph.getJobID(), blobKey);
	}
}
 
Example #13
Source File: YarnClusterDescriptor.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public ClusterClient<ApplicationId> deployJobCluster(
	ClusterSpecification clusterSpecification,
	JobGraph jobGraph,
	boolean detached) throws ClusterDeploymentException {

	// this is required because the slots are allocated lazily
	jobGraph.setAllowQueuedScheduling(true);

	try {
		return deployInternal(
			clusterSpecification,
			"Flink per-job cluster",
			getYarnJobClusterEntrypoint(),
			jobGraph,
			detached);
	} catch (Exception e) {
		throw new ClusterDeploymentException("Could not deploy Yarn job cluster.", e);
	}
}
 
Example #14
Source File: Dispatcher.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public CompletableFuture<Acknowledge> submitJob(JobGraph jobGraph, Time timeout) {
	log.info("Received JobGraph submission {} ({}).", jobGraph.getJobID(), jobGraph.getName());

	try {
		if (isDuplicateJob(jobGraph.getJobID())) {
			return FutureUtils.completedExceptionally(
				new JobSubmissionException(jobGraph.getJobID(), "Job has already been submitted."));
		} else if (isPartialResourceConfigured(jobGraph)) {
			return FutureUtils.completedExceptionally(
				new JobSubmissionException(jobGraph.getJobID(), "Currently jobs is not supported if parts of the vertices have " +
						"resources configured. The limitation will be removed in future versions."));
		} else {
			return internalSubmitJob(jobGraph);
		}
	} catch (FlinkException e) {
		return FutureUtils.completedExceptionally(e);
	}
}
 
Example #15
Source File: PerJobMiniClusterFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Starts a {@link MiniCluster} and submits a job.
 */
public CompletableFuture<JobClient> submitJob(JobGraph jobGraph) throws Exception {
	MiniClusterConfiguration miniClusterConfig = getMiniClusterConfig(jobGraph.getMaximumParallelism());
	MiniCluster miniCluster = miniClusterFactory.apply(miniClusterConfig);
	miniCluster.start();

	return miniCluster
		.submitJob(jobGraph)
		.thenApply(result -> new PerJobMiniClusterJobClient(result.getJobID(), miniCluster))
		.whenComplete((ignored, throwable) -> {
			if (throwable != null) {
				// We failed to create the JobClient and must shutdown to ensure cleanup.
				shutDownCluster(miniCluster);
			}
		})
		.thenApply(Function.identity());
}
 
Example #16
Source File: ClassPathPackagedProgramRetrieverTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testJobGraphRetrievalJobClassNameHasPrecedenceOverClassPath() throws IOException, FlinkException, ProgramInvocationException {
	final File testJar = new File("non-existing");

	final ClassPathPackagedProgramRetriever retrieverUnderTest =
		ClassPathPackagedProgramRetriever.newBuilder(PROGRAM_ARGUMENTS)
			// Both a class name is specified and a JAR "is" on the class path
			// The class name should have precedence.
		.setJobClassName(TestJob.class.getCanonicalName())
		.setJarsOnClassPath(() -> Collections.singleton(testJar))
		.build();

	final JobGraph jobGraph = retrieveJobGraph(retrieverUnderTest, new Configuration());

	assertThat(jobGraph.getName(), is(equalTo(TestJob.class.getCanonicalName() + "-suffix")));
}
 
Example #17
Source File: TestEnvironment.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public JobExecutionResult execute(String jobName) throws Exception {
	OptimizedPlan op = compileProgram(jobName);

	JobGraphGenerator jgg = new JobGraphGenerator();
	JobGraph jobGraph = jgg.compileJobGraph(op);

	for (Path jarFile: jarFiles) {
		jobGraph.addJar(jarFile);
	}

	jobGraph.setClasspaths(new ArrayList<>(classPaths));

	this.lastJobExecutionResult = jobExecutor.executeJobBlocking(jobGraph);
	return this.lastJobExecutionResult;
}
 
Example #18
Source File: Dispatcher.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Recovers all jobs persisted via the submitted job graph store.
 */
@VisibleForTesting
Collection<JobGraph> recoverJobs() throws Exception {
	log.info("Recovering all persisted jobs.");
	final Collection<JobID> jobIds = submittedJobGraphStore.getJobIds();

	try {
		return recoverJobGraphs(jobIds);
	} catch (Exception e) {
		// release all recovered job graphs
		for (JobID jobId : jobIds) {
			try {
				submittedJobGraphStore.releaseJobGraph(jobId);
			} catch (Exception ie) {
				e.addSuppressed(ie);
			}
		}
		throw e;
	}
}
 
Example #19
Source File: Dispatcher.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private CompletableFuture<Boolean> tryAcceptLeadershipAndRunJobs(UUID newLeaderSessionID, Collection<JobGraph> recoveredJobs) {
	final DispatcherId dispatcherId = DispatcherId.fromUuid(newLeaderSessionID);

	if (leaderElectionService.hasLeadership(newLeaderSessionID)) {
		log.debug("Dispatcher {} accepted leadership with fencing token {}. Start recovered jobs.", getAddress(), dispatcherId);
		setNewFencingToken(dispatcherId);

		Collection<CompletableFuture<?>> runFutures = new ArrayList<>(recoveredJobs.size());

		for (JobGraph recoveredJob : recoveredJobs) {
			final CompletableFuture<?> runFuture = waitForTerminatingJobManager(recoveredJob.getJobID(), recoveredJob, this::runJob);
			runFutures.add(runFuture);
		}

		return FutureUtils.waitForAll(runFutures).thenApply(ignored -> true);
	} else {
		log.debug("Dispatcher {} lost leadership before accepting it. Stop recovering jobs for fencing token {}.", getAddress(), dispatcherId);
		return CompletableFuture.completedFuture(false);
	}
}
 
Example #20
Source File: Dispatcher.java    From flink with Apache License 2.0 5 votes vote down vote up
@Nullable
private JobGraph recoverJob(JobID jobId) throws Exception {
	log.debug("Recover job {}.", jobId);
	final SubmittedJobGraph submittedJobGraph = submittedJobGraphStore.recoverJobGraph(jobId);

	if (submittedJobGraph != null) {
		return submittedJobGraph.getJobGraph();
	} else {
		return null;
	}
}
 
Example #21
Source File: JobManagerRunnerImplTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setupClass() {
	defaultJobMasterServiceFactory = new TestingJobMasterServiceFactory();

	final JobVertex jobVertex = new JobVertex("Test vertex");
	jobVertex.setInvokableClass(NoOpInvokable.class);
	jobGraph = new JobGraph(jobVertex);

	archivedExecutionGraph = new ArchivedExecutionGraphBuilder()
		.setJobID(jobGraph.getJobID())
		.setState(JobStatus.FINISHED)
		.build();
}
 
Example #22
Source File: JobManagerRunnerFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
JobManagerRunner createJobManagerRunner(
JobGraph jobGraph,
Configuration configuration,
RpcService rpcService,
HighAvailabilityServices highAvailabilityServices,
HeartbeatServices heartbeatServices,
JobManagerSharedServices jobManagerServices,
JobManagerJobMetricGroupFactory jobManagerJobMetricGroupFactory,
FatalErrorHandler fatalErrorHandler) throws Exception;
 
Example #23
Source File: PipelinedFailoverRegionBuildingTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * <pre>
 *     (a1) -+-> (b1) -+-> (c1) 
 *           X         X
 *     (a2) -+-> (b2) -+-> (c2)
 *           X         X
 *     (a3) -+-> (b3) -+-> (c3)
 *
 *           ^         ^
 *           |         |
 *     (pipelined) (blocking)
 * </pre>
 */
@Test
public void testTwoComponentsViaBlockingExchange2() throws Exception {
	final JobVertex vertex1 = new JobVertex("vertex1");
	vertex1.setInvokableClass(NoOpInvokable.class);
	vertex1.setParallelism(3);

	final JobVertex vertex2 = new JobVertex("vertex2");
	vertex2.setInvokableClass(NoOpInvokable.class);
	vertex2.setParallelism(2);

	final JobVertex vertex3 = new JobVertex("vertex3");
	vertex3.setInvokableClass(NoOpInvokable.class);
	vertex3.setParallelism(2);

	vertex2.connectNewDataSetAsInput(vertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED);
	vertex3.connectNewDataSetAsInput(vertex2, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);

	final JobGraph jobGraph = new JobGraph("test job", vertex1, vertex2, vertex3);
	final ExecutionGraph eg = createExecutionGraph(jobGraph);

	RestartPipelinedRegionStrategy failoverStrategy = (RestartPipelinedRegionStrategy) eg.getFailoverStrategy();
	FailoverRegion region1 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex1.getID()).getTaskVertices()[1]);
	FailoverRegion region2 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex2.getID()).getTaskVertices()[0]);
	FailoverRegion region31 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[0]);
	FailoverRegion region32 = failoverStrategy.getFailoverRegion(eg.getJobVertex(vertex3.getID()).getTaskVertices()[1]);

	assertTrue(region1 == region2);
	assertTrue(region2 != region31);
	assertTrue(region32 != region31);
}
 
Example #24
Source File: MiniClusterITCase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testJobWithAllVerticesFailingDuringInstantiation() throws Exception {
	final int parallelism = 11;

	final MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder()
		.setNumTaskManagers(1)
		.setNumSlotsPerTaskManager(parallelism)
		.setConfiguration(getDefaultConfiguration())
		.build();

	try (final MiniCluster miniCluster = new MiniCluster(cfg)) {
		miniCluster.start();

		final JobVertex sender = new JobVertex("Sender");
		sender.setInvokableClass(InstantiationErrorSender.class);
		sender.setParallelism(parallelism);

		final JobVertex receiver = new JobVertex("Receiver");
		receiver.setInvokableClass(Receiver.class);
		receiver.setParallelism(parallelism);

		receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE,
			ResultPartitionType.PIPELINED);

		final JobGraph jobGraph = new JobGraph("Pointwise Job", sender, receiver);

		try {
			miniCluster.executeJobBlocking(jobGraph);

			fail("Job should fail.");
		} catch (JobExecutionException e) {
			assertTrue(findThrowable(e, Exception.class).isPresent());
			assertTrue(findThrowableWithMessage(e, "Test exception in constructor").isPresent());
		}
	}
}
 
Example #25
Source File: LocalStreamEnvironmentWithAsyncExecution.java    From flink-crawler with Apache License 2.0 5 votes vote down vote up
/**
 * Executes the JobGraph of the on a mini cluster of CLusterUtil with a user specified name.
 *
 * @param jobName
 *            name of the job
 * @return The result of the job execution, containing elapsed time and accumulators.
 */
@Override
public JobExecutionResult execute(String jobName) throws Exception {
    // transform the streaming program into a JobGraph
    StreamGraph streamGraph = getStreamGraph();
    streamGraph.setJobName(jobName);

    JobGraph jobGraph = streamGraph.getJobGraph();

    Configuration configuration = new Configuration();
    configuration.addAll(jobGraph.getJobConfiguration());

    configuration.setInteger(TaskManagerOptions.NUM_TASK_SLOTS,
            jobGraph.getMaximumParallelism());

    // add (and override) the settings with what the user defined
    configuration.addAll(_conf);

    _exec = new LocalFlinkMiniCluster(configuration, true);

    try {
        _exec.start();
        return _exec.submitJobAndWait(jobGraph, getConfig().isSysoutLoggingEnabled());
    } finally {
        transformations.clear();
        _exec.stop();
        _exec = null;
    }
}
 
Example #26
Source File: JarPlanHandler.java    From flink with Apache License 2.0 5 votes vote down vote up
public JarPlanHandler(
		final GatewayRetriever<? extends RestfulGateway> leaderRetriever,
		final Time timeout,
		final Map<String, String> responseHeaders,
		final MessageHeaders<JarPlanRequestBody, JobPlanInfo, JarPlanMessageParameters> messageHeaders,
		final Path jarDir,
		final Configuration configuration,
		final Executor executor,
		final Function<JobGraph, JobPlanInfo> planGenerator) {
	super(leaderRetriever, timeout, responseHeaders, messageHeaders);
	this.jarDir = requireNonNull(jarDir);
	this.configuration = requireNonNull(configuration);
	this.executor = requireNonNull(executor);
	this.planGenerator = planGenerator;
}
 
Example #27
Source File: DefaultSchedulerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void suspendJobWillIncrementVertexVersions() {
	final JobGraph jobGraph = singleNonParallelJobVertexJobGraph();
	final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph);
	final ExecutionVertexID onlyExecutionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0);

	final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph);
	final ExecutionVertexVersion executionVertexVersion = executionVertexVersioner.getExecutionVertexVersion(
		onlyExecutionVertexId);

	scheduler.suspend(new Exception("forced suspend"));

	assertTrue(executionVertexVersioner.isModified(executionVertexVersion));
}
 
Example #28
Source File: FlinkPipelineTranslationUtil.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Transmogrifies the given {@link Pipeline} under the userClassloader to a {@link JobGraph}.
 */
public static JobGraph getJobGraphUnderUserClassLoader(
	final ClassLoader userClassloader,
	final Pipeline pipeline,
	final Configuration configuration,
	final int defaultParallelism) {
	final ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
	try {
		Thread.currentThread().setContextClassLoader(userClassloader);
		return FlinkPipelineTranslationUtil.getJobGraph(pipeline, configuration, defaultParallelism);
	} finally {
		Thread.currentThread().setContextClassLoader(contextClassLoader);
	}
}
 
Example #29
Source File: JobRetrievalITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testJobRetrieval() throws Exception {
	final JobID jobID = new JobID();

	final JobVertex imalock = new JobVertex("imalock");
	imalock.setInvokableClass(SemaphoreInvokable.class);

	final JobGraph jobGraph = new JobGraph(jobID, "testjob", imalock);

	// acquire the lock to make sure that the job cannot complete until the job client
	// has been attached in resumingThread
	lock.acquire();

	ClientUtils.submitJob(client, jobGraph);

	final CheckedThread resumingThread = new CheckedThread("Flink-Job-Retriever") {
		@Override
		public void go() throws Exception {
			assertNotNull(client.requestJobResult(jobID).get());
		}
	};

	// wait until the job is running
	while (client.listJobs().get().isEmpty()) {
		Thread.sleep(50);
	}

	// kick off resuming
	resumingThread.start();

	// wait for client to connect
	while (resumingThread.getState() != Thread.State.WAITING) {
		Thread.sleep(10);
	}

	// client has connected, we can release the lock
	lock.release();

	resumingThread.sync();
}
 
Example #30
Source File: JobMasterTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testRequestKvStateWithIrrelevantRegistration() throws Exception {
	final JobGraph graph = createKvJobGraph();

	final JobMaster jobMaster = createJobMaster(
		configuration,
		graph,
		haServices,
		new TestingJobManagerSharedServicesBuilder().build(),
		heartbeatServices);

	CompletableFuture<Acknowledge> startFuture = jobMaster.start(jobMasterId);
	final JobMasterGateway jobMasterGateway = jobMaster.getSelfGateway(JobMasterGateway.class);

	try {
		// wait for the start to complete
		startFuture.get(testingTimeout.toMilliseconds(), TimeUnit.MILLISECONDS);

		// register an irrelevant KvState
		try {
			jobMasterGateway.notifyKvStateRegistered(
				new JobID(),
				new JobVertexID(),
				new KeyGroupRange(0, 0),
				"any-name",
				new KvStateID(),
				new InetSocketAddress(InetAddress.getLocalHost(), 1233)).get();
			fail("Expected to fail with FlinkJobNotFoundException.");
		} catch (Exception e) {
			assertTrue(ExceptionUtils.findThrowable(e, FlinkJobNotFoundException.class).isPresent());
		}
	} finally {
		RpcUtils.terminateRpcEndpoint(jobMaster, testingTimeout);
	}
}