Java Code Examples for org.apache.flink.client.ClientUtils#submitJobAndWaitForResult()
The following examples show how to use
org.apache.flink.client.ClientUtils#submitJobAndWaitForResult() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RegionFailoverITCase.java From flink with Apache License 2.0 | 5 votes |
/** * Tests that a simple job (Source -> Map) with multi regions could restore with operator state. * * <p>The last subtask of Map function in the 1st stream graph would fail {@code NUM_OF_RESTARTS} times, * and it will verify whether the restored state is identical to last completed checkpoint's. */ @Test(timeout = 60000) public void testMultiRegionFailover() { try { JobGraph jobGraph = createJobGraph(); ClusterClient<?> client = cluster.getClusterClient(); ClientUtils.submitJobAndWaitForResult(client, jobGraph, RegionFailoverITCase.class.getClassLoader()); verifyAfterJobExecuted(); } catch (Exception e) { e.printStackTrace(); Assert.fail(e.getMessage()); } }
Example 2
Source File: NetworkStackThroughputITCase.java From flink with Apache License 2.0 | 5 votes |
private void testProgram( final MiniClusterWithClientResource cluster, final int dataVolumeGb, final boolean useForwarder, final boolean isSlowSender, final boolean isSlowReceiver, final int parallelism) throws Exception { ClusterClient<?> client = cluster.getClusterClient(); JobExecutionResult jer = ClientUtils.submitJobAndWaitForResult( client, createJobGraph( dataVolumeGb, useForwarder, isSlowSender, isSlowReceiver, parallelism), getClass().getClassLoader()); long dataVolumeMbit = dataVolumeGb * 8192; long runtimeSecs = jer.getNetRuntime(TimeUnit.SECONDS); int mbitPerSecond = (int) (((double) dataVolumeMbit) / runtimeSecs); LOG.info(String.format("Test finished with throughput of %d MBit/s (runtime [secs]: %d, " + "data volume [gb/mbits]: %d/%d)", mbitPerSecond, runtimeSecs, dataVolumeGb, dataVolumeMbit)); }
Example 3
Source File: SavepointWriterITCase.java From flink with Apache License 2.0 | 5 votes |
private void validateBootstrap(String savepointPath) throws ProgramInvocationException { StreamExecutionEnvironment sEnv = StreamExecutionEnvironment.getExecutionEnvironment(); sEnv.setStateBackend(backend); CollectSink.accountList.clear(); sEnv.fromCollection(accounts) .keyBy(acc -> acc.id) .flatMap(new UpdateAndGetAccount()) .uid(ACCOUNT_UID) .addSink(new CollectSink()); sEnv .fromCollection(currencyRates) .connect(sEnv.fromCollection(currencyRates).broadcast(descriptor)) .process(new CurrencyValidationFunction()) .uid(CURRENCY_UID) .addSink(new DiscardingSink<>()); JobGraph jobGraph = sEnv.getStreamGraph().getJobGraph(); jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath, false)); ClusterClient<?> client = miniClusterResource.getClusterClient(); ClientUtils.submitJobAndWaitForResult(client, jobGraph, SavepointWriterITCase.class.getClassLoader()); Assert.assertEquals("Unexpected output", 3, CollectSink.accountList.size()); }
Example 4
Source File: SavepointWriterITCase.java From flink with Apache License 2.0 | 5 votes |
private void validateModification(String savepointPath) throws ProgramInvocationException { StreamExecutionEnvironment sEnv = StreamExecutionEnvironment.getExecutionEnvironment(); sEnv.setStateBackend(backend); CollectSink.accountList.clear(); DataStream<Account> stream = sEnv.fromCollection(accounts) .keyBy(acc -> acc.id) .flatMap(new UpdateAndGetAccount()) .uid(ACCOUNT_UID); stream.addSink(new CollectSink()); stream .map(acc -> acc.id) .map(new StatefulOperator()) .uid(MODIFY_UID) .addSink(new DiscardingSink<>()); JobGraph jobGraph = sEnv.getStreamGraph().getJobGraph(); jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath, false)); ClusterClient<?> client = miniClusterResource.getClusterClient(); ClientUtils.submitJobAndWaitForResult(client, jobGraph, SavepointWriterITCase.class.getClassLoader()); Assert.assertEquals("Unexpected output", 3, CollectSink.accountList.size()); }
Example 5
Source File: RestClusterClientTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testJobSubmitCancel() throws Exception { TestJobSubmitHandler submitHandler = new TestJobSubmitHandler(); TestJobCancellationHandler terminationHandler = new TestJobCancellationHandler(); TestJobExecutionResultHandler testJobExecutionResultHandler = new TestJobExecutionResultHandler( JobExecutionResultResponseBody.created(new JobResult.Builder() .applicationStatus(ApplicationStatus.SUCCEEDED) .jobId(jobId) .netRuntime(Long.MAX_VALUE) .build())); try (TestRestServerEndpoint restServerEndpoint = createRestServerEndpoint( submitHandler, terminationHandler, testJobExecutionResultHandler)) { try (RestClusterClient<?> restClusterClient = createRestClusterClient(restServerEndpoint.getServerAddress().getPort())) { Assert.assertFalse(submitHandler.jobSubmitted); ClientUtils.submitJobAndWaitForResult(restClusterClient, jobGraph, ClassLoader.getSystemClassLoader()); Assert.assertTrue(submitHandler.jobSubmitted); Assert.assertFalse(terminationHandler.jobCanceled); restClusterClient.cancel(jobId).get(); Assert.assertTrue(terminationHandler.jobCanceled); } } }
Example 6
Source File: RestClusterClientTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testJobSubmissionFailureThrowsProgramInvocationException() throws Exception { try (final TestRestServerEndpoint restServerEndpoint = createRestServerEndpoint(new SubmissionFailingHandler())) { RestClusterClient<?> restClusterClient = createRestClusterClient(restServerEndpoint.getServerAddress().getPort()); try { ClientUtils.submitJobAndWaitForResult(restClusterClient, jobGraph, ClassLoader.getSystemClassLoader()); } catch (final ProgramInvocationException expected) { // expected } finally { restClusterClient.close(); } } }
Example 7
Source File: KafkaConsumerTestBase.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that the source can be properly canceled when reading empty partitions. */ public void runCancelingOnEmptyInputTest() throws Exception { final String topic = "cancelingOnEmptyInputTopic"; final int parallelism = 3; createTestTopic(topic, parallelism, 1); final AtomicReference<Throwable> error = new AtomicReference<>(); final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(parallelism); env.enableCheckpointing(100); Properties props = new Properties(); props.putAll(standardProps); props.putAll(secureProps); FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, new SimpleStringSchema(), props); env.addSource(source).addSink(new DiscardingSink<String>()); JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph()); final JobID jobId = jobGraph.getJobID(); final Runnable jobRunner = () -> { try { ClientUtils.submitJobAndWaitForResult(client, jobGraph, KafkaConsumerTestBase.class.getClassLoader()); } catch (Throwable t) { LOG.error("Job Runner failed with exception", t); error.set(t); } }; Thread runnerThread = new Thread(jobRunner, "program runner thread"); runnerThread.start(); // wait a bit before canceling Thread.sleep(2000); Throwable failueCause = error.get(); if (failueCause != null) { failueCause.printStackTrace(); Assert.fail("Test failed prematurely with: " + failueCause.getMessage()); } // cancel client.cancel(jobId).get(); // wait for the program to be done and validate that we failed with the right exception runnerThread.join(); assertEquals(JobStatus.CANCELED, client.getJobStatus(jobId).get()); deleteTestTopic(topic); }
Example 8
Source File: AccumulatorLiveITCase.java From flink with Apache License 2.0 | 4 votes |
private static void submitJobAndVerifyResults(JobGraph jobGraph) throws Exception { Deadline deadline = Deadline.now().plus(Duration.ofSeconds(30)); final ClusterClient<?> client = MINI_CLUSTER_RESOURCE.getClusterClient(); final CheckedThread submissionThread = new CheckedThread() { @Override public void go() throws Exception { ClientUtils.submitJobAndWaitForResult(client, jobGraph, AccumulatorLiveITCase.class.getClassLoader()); } }; submissionThread.start(); try { NotifyingMapper.notifyLatch.await(); FutureUtils.retrySuccessfulWithDelay( () -> { try { return CompletableFuture.completedFuture(client.getAccumulators(jobGraph.getJobID()).get()); } catch (Exception e) { return FutureUtils.completedExceptionally(e); } }, Time.milliseconds(20), deadline, accumulators -> accumulators.size() == 1 && accumulators.containsKey(ACCUMULATOR_NAME) && (int) accumulators.get(ACCUMULATOR_NAME) == NUM_ITERATIONS, TestingUtils.defaultScheduledExecutor() ).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); NotifyingMapper.shutdownLatch.trigger(); } finally { NotifyingMapper.shutdownLatch.trigger(); // wait for the job to have terminated submissionThread.sync(); } }
Example 9
Source File: RescalingITCase.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that a job cannot be restarted from a savepoint with a different parallelism if the * rescaled operator has non-partitioned state. * * @throws Exception */ @Test public void testSavepointRescalingNonPartitionedStateCausesException() throws Exception { final int parallelism = numSlots / 2; final int parallelism2 = numSlots; final int maxParallelism = 13; Duration timeout = Duration.ofMinutes(3); Deadline deadline = Deadline.now().plus(timeout); ClusterClient<?> client = cluster.getClusterClient(); try { JobGraph jobGraph = createJobGraphWithOperatorState(parallelism, maxParallelism, OperatorCheckpointMethod.NON_PARTITIONED); final JobID jobID = jobGraph.getJobID(); ClientUtils.submitJob(client, jobGraph); // wait until the operator is started StateSourceBase.workStartedLatch.await(); CompletableFuture<String> savepointPathFuture = client.triggerSavepoint(jobID, null); final String savepointPath = savepointPathFuture.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); client.cancel(jobID).get(); while (!getRunningJobs(client).isEmpty()) { Thread.sleep(50); } // job successfully removed JobGraph scaledJobGraph = createJobGraphWithOperatorState(parallelism2, maxParallelism, OperatorCheckpointMethod.NON_PARTITIONED); scaledJobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath)); ClientUtils.submitJobAndWaitForResult(client, scaledJobGraph, RescalingITCase.class.getClassLoader()); } catch (JobExecutionException exception) { if (exception.getCause() instanceof IllegalStateException) { // we expect a IllegalStateException wrapped // in a JobExecutionException, because the job containing non-partitioned state // is being rescaled } else { throw exception; } } }
Example 10
Source File: SavepointITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testSubmitWithUnknownSavepointPath() throws Exception { // Config int numTaskManagers = 1; int numSlotsPerTaskManager = 1; int parallelism = numTaskManagers * numSlotsPerTaskManager; final Configuration config = new Configuration(); config.setString(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir.toURI().toString()); MiniClusterWithClientResource cluster = new MiniClusterWithClientResource( new MiniClusterResourceConfiguration.Builder() .setConfiguration(config) .setNumberTaskManagers(numTaskManagers) .setNumberSlotsPerTaskManager(numSlotsPerTaskManager) .build()); cluster.before(); ClusterClient<?> client = cluster.getClusterClient(); try { // High value to ensure timeouts if restarted. int numberOfRetries = 1000; // Submit the job // Long delay to ensure that the test times out if the job // manager tries to restart the job. final JobGraph jobGraph = createJobGraph(parallelism, numberOfRetries, 3600000); // Set non-existing savepoint path jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath("unknown path")); assertEquals("unknown path", jobGraph.getSavepointRestoreSettings().getRestorePath()); LOG.info("Submitting job " + jobGraph.getJobID() + " in detached mode."); try { ClientUtils.submitJobAndWaitForResult(client, jobGraph, SavepointITCase.class.getClassLoader()); } catch (Exception e) { Optional<JobExecutionException> expectedJobExecutionException = ExceptionUtils.findThrowable(e, JobExecutionException.class); Optional<FileNotFoundException> expectedFileNotFoundException = ExceptionUtils.findThrowable(e, FileNotFoundException.class); if (!(expectedJobExecutionException.isPresent() && expectedFileNotFoundException.isPresent())) { throw e; } } } finally { cluster.after(); } }
Example 11
Source File: BigUserProgramJobSubmitITCase.java From flink with Apache License 2.0 | 4 votes |
/** * Use a map function that references a 100MB byte array. */ @Test public void bigDataInMap() throws Exception { final byte[] data = new byte[16 * 1024 * 1024]; // 16 MB rnd.nextBytes(data); // use random data so that Java does not optimise it away data[1] = 0; data[3] = 0; data[5] = 0; CollectingSink resultSink = new CollectingSink(); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); DataStream<Integer> src = env.fromElements(1, 3, 5); src.map(new MapFunction<Integer, String>() { private static final long serialVersionUID = 1L; @Override public String map(Integer value) throws Exception { return "x " + value + " " + data[value]; } }).addSink(resultSink); JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph()); final RestClusterClient<StandaloneClusterId> restClusterClient = new RestClusterClient<>( MINI_CLUSTER_RESOURCE.getClientConfiguration(), StandaloneClusterId.getInstance()); try { ClientUtils.submitJobAndWaitForResult(restClusterClient, jobGraph, BigUserProgramJobSubmitITCase.class.getClassLoader()); List<String> expected = Arrays.asList("x 1 0", "x 3 0", "x 5 0"); List<String> result = CollectingSink.result; Collections.sort(expected); Collections.sort(result); assertEquals(expected, result); } finally { restClusterClient.close(); } }
Example 12
Source File: RestClusterClientTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testSubmitJobAndWaitForExecutionResult() throws Exception { final TestJobExecutionResultHandler testJobExecutionResultHandler = new TestJobExecutionResultHandler( new RestHandlerException("should trigger retry", HttpResponseStatus.SERVICE_UNAVAILABLE), JobExecutionResultResponseBody.inProgress(), JobExecutionResultResponseBody.created(new JobResult.Builder() .applicationStatus(ApplicationStatus.SUCCEEDED) .jobId(jobId) .netRuntime(Long.MAX_VALUE) .accumulatorResults(Collections.singletonMap("testName", new SerializedValue<>(OptionalFailure.of(1.0)))) .build()), JobExecutionResultResponseBody.created(new JobResult.Builder() .applicationStatus(ApplicationStatus.FAILED) .jobId(jobId) .netRuntime(Long.MAX_VALUE) .serializedThrowable(new SerializedThrowable(new RuntimeException("expected"))) .build())); // fail first HTTP polling attempt, which should not be a problem because of the retries final AtomicBoolean firstPollFailed = new AtomicBoolean(); failHttpRequest = (messageHeaders, messageParameters, requestBody) -> messageHeaders instanceof JobExecutionResultHeaders && !firstPollFailed.getAndSet(true); try (TestRestServerEndpoint restServerEndpoint = createRestServerEndpoint( testJobExecutionResultHandler, new TestJobSubmitHandler())) { RestClusterClient<?> restClusterClient = createRestClusterClient(restServerEndpoint.getServerAddress().getPort()); try { JobExecutionResult jobExecutionResult; jobExecutionResult = ClientUtils.submitJobAndWaitForResult(restClusterClient, jobGraph, ClassLoader.getSystemClassLoader()); assertThat(jobExecutionResult.getJobID(), equalTo(jobId)); assertThat(jobExecutionResult.getNetRuntime(), equalTo(Long.MAX_VALUE)); assertThat( jobExecutionResult.getAllAccumulatorResults(), equalTo(Collections.singletonMap("testName", 1.0))); try { ClientUtils.submitJobAndWaitForResult(restClusterClient, jobGraph, ClassLoader.getSystemClassLoader()); fail("Expected exception not thrown."); } catch (final ProgramInvocationException e) { final Optional<RuntimeException> cause = ExceptionUtils.findThrowable(e, RuntimeException.class); assertThat(cause.isPresent(), is(true)); assertThat(cause.get().getMessage(), equalTo("expected")); } } finally { restClusterClient.close(); } } }