org.apache.flink.streaming.api.graph.StreamingJobGraphGenerator Java Examples

The following examples show how to use org.apache.flink.streaming.api.graph.StreamingJobGraphGenerator. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AbstractOperatorRestoreTestBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private JobGraph createJobGraph(ExecutionMode mode) {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(500, CheckpointingMode.EXACTLY_ONCE);
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.setStateBackend((StateBackend) new MemoryStateBackend());

	switch (mode) {
		case MIGRATE:
			createMigrationJob(env);
			break;
		case RESTORE:
			createRestoredJob(env);
			break;
	}

	return StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
}
 
Example #2
Source File: CheckpointExceptionHandlerConfigurationTest.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
public void doTestPropagationFromCheckpointConfig(boolean failTaskOnCheckpointErrors) throws Exception {
	StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
	streamExecutionEnvironment.setParallelism(1);
	streamExecutionEnvironment.getCheckpointConfig().setCheckpointInterval(1000);
	streamExecutionEnvironment.getCheckpointConfig().setFailOnCheckpointingErrors(failTaskOnCheckpointErrors);
	streamExecutionEnvironment.addSource(new SourceFunction<Integer>() {

		@Override
		public void run(SourceContext<Integer> ctx) throws Exception {
		}

		@Override
		public void cancel() {
		}

	}).addSink(new DiscardingSink<>());

	StreamGraph streamGraph = streamExecutionEnvironment.getStreamGraph();
	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph);
	SerializedValue<ExecutionConfig> serializedExecutionConfig = jobGraph.getSerializedExecutionConfig();
	ExecutionConfig executionConfig =
		serializedExecutionConfig.deserializeValue(Thread.currentThread().getContextClassLoader());

	Assert.assertEquals(failTaskOnCheckpointErrors, executionConfig.isFailTaskOnCheckpointError());
}
 
Example #3
Source File: AbstractOperatorRestoreTestBase.java    From flink with Apache License 2.0 6 votes vote down vote up
private JobGraph createJobGraph(ExecutionMode mode) {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(500, CheckpointingMode.EXACTLY_ONCE);
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.setStateBackend((StateBackend) new MemoryStateBackend());

	switch (mode) {
		case MIGRATE:
			createMigrationJob(env);
			break;
		case RESTORE:
			createRestoredJob(env);
			break;
	}

	return StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
}
 
Example #4
Source File: AbstractOperatorRestoreTestBase.java    From flink with Apache License 2.0 6 votes vote down vote up
private JobGraph createJobGraph(ExecutionMode mode) {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(500, CheckpointingMode.EXACTLY_ONCE);
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.setStateBackend((StateBackend) new MemoryStateBackend());

	switch (mode) {
		case MIGRATE:
			createMigrationJob(env);
			break;
		case RESTORE:
			createRestoredJob(env);
			break;
	}

	return StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
}
 
Example #5
Source File: RemoteChannelThroughputBenchmark.java    From flink-benchmarks with Apache License 2.0 5 votes vote down vote up
@Benchmark
public void remoteRebalance(FlinkEnvironmentContext context) throws Exception {
    StreamExecutionEnvironment env = context.env;
    env.enableCheckpointing(CHECKPOINT_INTERVAL_MS);
    env.setParallelism(PARALLELISM);

    DataStreamSource<Long> source = env.addSource(new LongSource(RECORDS_PER_SUBTASK));
    source
        .slotSharingGroup("source").rebalance()
        .map((MapFunction<Long, Long>) value -> value).slotSharingGroup("map").rebalance()
        .addSink(new DiscardingSink<>()).slotSharingGroup("sink");

    miniCluster.executeJobBlocking(StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph()));
}
 
Example #6
Source File: KafkaConsumerTestBase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the source can be properly canceled when reading empty partitions.
 */
public void runCancelingOnEmptyInputTest() throws Exception {
	final String topic = "cancelingOnEmptyInputTopic";

	final int parallelism = 3;
	createTestTopic(topic, parallelism, 1);

	final AtomicReference<Throwable> error = new AtomicReference<>();

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.enableCheckpointing(100);
	env.getConfig().disableSysoutLogging();

	Properties props = new Properties();
	props.putAll(standardProps);
	props.putAll(secureProps);
	FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, new SimpleStringSchema(), props);

	env.addSource(source).addSink(new DiscardingSink<String>());

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
	final JobID jobId = jobGraph.getJobID();

	final Runnable jobRunner = new Runnable() {
		@Override
		public void run() {
			try {
				client.setDetached(false);
				client.submitJob(jobGraph, KafkaConsumerTestBase.class.getClassLoader());
			}
			catch (Throwable t) {
				LOG.error("Job Runner failed with exception", t);
				error.set(t);
			}
		}
	};

	Thread runnerThread = new Thread(jobRunner, "program runner thread");
	runnerThread.start();

	// wait a bit before canceling
	Thread.sleep(2000);

	Throwable failueCause = error.get();
	if (failueCause != null) {
		failueCause.printStackTrace();
		Assert.fail("Test failed prematurely with: " + failueCause.getMessage());
	}
	// cancel
	client.cancel(jobId);

	// wait for the program to be done and validate that we failed with the right exception
	runnerThread.join();

	assertEquals(JobStatus.CANCELED, client.getJobStatus(jobId).get());

	deleteTestTopic(topic);
}
 
Example #7
Source File: BigUserProgramJobSubmitITCase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Use a map function that references a 100MB byte array.
 */
@Test
public void bigDataInMap() throws Exception {

	final byte[] data = new byte[16 * 1024 * 1024]; // 16 MB
	rnd.nextBytes(data); // use random data so that Java does not optimise it away
	data[1] = 0;
	data[3] = 0;
	data[5] = 0;

	CollectingSink resultSink = new CollectingSink();

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);

	DataStream<Integer> src = env.fromElements(1, 3, 5);

	src.map(new MapFunction<Integer, String>() {
		private static final long serialVersionUID = 1L;

		@Override
		public String map(Integer value) throws Exception {
			return "x " + value + " " + data[value];
		}
	}).addSink(resultSink);

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());

	final RestClusterClient<StandaloneClusterId> restClusterClient = new RestClusterClient<>(
		MINI_CLUSTER_RESOURCE.getClientConfiguration(),
		StandaloneClusterId.getInstance());

	try {
		restClusterClient.setDetached(false);
		restClusterClient.submitJob(jobGraph, BigUserProgramJobSubmitITCase.class.getClassLoader());

		List<String> expected = Arrays.asList("x 1 0", "x 3 0", "x 5 0");

		List<String> result = CollectingSink.result;

		Collections.sort(expected);
		Collections.sort(result);

		assertEquals(expected, result);
	} finally {
		restClusterClient.shutdown();
	}
}
 
Example #8
Source File: FlinkPulsarITest.java    From pulsar-flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testCancelingOnFullInput() throws Exception {
    String tp = newTopic();
    int parallelism = 3;
    createTopic(tp, parallelism, adminUrl);

    InfiniteStringGenerator generator = new InfiniteStringGenerator(tp);
    generator.start();

    // launch a consumer asynchronously

    AtomicReference<Throwable> jobError = new AtomicReference<>();

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(parallelism);
    env.enableCheckpointing(100);
    env.getConfig().disableSysoutLogging();

    Properties prop = sourceProperties();
    prop.setProperty(TOPIC_SINGLE_OPTION_KEY, tp);
    env.addSource(new FlinkPulsarRowSource(serviceUrl, adminUrl, prop).setStartFromEarliest())
            .addSink(new DiscardingSink<>());

    JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
    JobID jobid = jobGraph.getJobID();

    Thread jobRunner = new Thread("program runner thread") {
        @Override
        public void run() {
            try {
                client.setDetached(false);
                client.submitJob(jobGraph, getClass().getClassLoader());
            } catch (Throwable e) {
                jobError.set(e);
            }
        }
    };
    jobRunner.start();

    Thread.sleep(2000);
    Throwable failureCause = jobError.get();

    if (failureCause != null) {
        failureCause.printStackTrace();
        fail("Test failed prematurely with: " + failureCause.getMessage());
    }

    client.cancel(jobid);

    jobRunner.join();

    assertEquals(client.getJobStatus(jobid).get(), JobStatus.CANCELED);

    if (generator.isAlive()) {
        generator.shutdown();
        generator.join();
    } else {
        Throwable t = generator.getError();
        if (t != null) {
            t.printStackTrace();
            fail("Generator failed " + t.getMessage());
        } else {
            fail("Generator failed with no exception");
        }
    }
}
 
Example #9
Source File: FlinkPulsarITest.java    From pulsar-flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testOnEmptyInput() throws Exception {
    String tp = newTopic();
    int parallelism = 3;
    createTopic(tp, parallelism, adminUrl);

    // launch a consumer asynchronously

    AtomicReference<Throwable> jobError = new AtomicReference<>();

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(parallelism);
    env.enableCheckpointing(100);
    env.getConfig().disableSysoutLogging();

    Properties prop = sourceProperties();
    prop.setProperty(TOPIC_SINGLE_OPTION_KEY, tp);
    env.addSource(new FlinkPulsarRowSource(serviceUrl, adminUrl, prop).setStartFromEarliest())
            .addSink(new DiscardingSink<>());

    JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
    JobID jobid = jobGraph.getJobID();

    Thread jobRunner = new Thread("program runner thread") {
        @Override
        public void run() {
            try {
                client.setDetached(false);
                client.submitJob(jobGraph, getClass().getClassLoader());
            } catch (Throwable e) {
                jobError.set(e);
            }
        }
    };
    jobRunner.start();

    Thread.sleep(2000);
    Throwable failureCause = jobError.get();

    if (failureCause != null) {
        failureCause.printStackTrace();
        fail("Test failed prematurely with: " + failureCause.getMessage());
    }

    client.cancel(jobid);

    jobRunner.join();

    assertEquals(client.getJobStatus(jobid).get(), JobStatus.CANCELED);

}
 
Example #10
Source File: KafkaConsumerTestBase.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the source can be properly canceled when reading empty partitions.
 */
public void runCancelingOnEmptyInputTest() throws Exception {
	final String topic = "cancelingOnEmptyInputTopic";

	final int parallelism = 3;
	createTestTopic(topic, parallelism, 1);

	final AtomicReference<Throwable> error = new AtomicReference<>();

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.enableCheckpointing(100);
	env.getConfig().disableSysoutLogging();

	Properties props = new Properties();
	props.putAll(standardProps);
	props.putAll(secureProps);
	FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, new SimpleStringSchema(), props);

	env.addSource(source).addSink(new DiscardingSink<String>());

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
	final JobID jobId = jobGraph.getJobID();

	final Runnable jobRunner = new Runnable() {
		@Override
		public void run() {
			try {
				client.setDetached(false);
				client.submitJob(jobGraph, KafkaConsumerTestBase.class.getClassLoader());
			}
			catch (Throwable t) {
				LOG.error("Job Runner failed with exception", t);
				error.set(t);
			}
		}
	};

	Thread runnerThread = new Thread(jobRunner, "program runner thread");
	runnerThread.start();

	// wait a bit before canceling
	Thread.sleep(2000);

	Throwable failueCause = error.get();
	if (failueCause != null) {
		failueCause.printStackTrace();
		Assert.fail("Test failed prematurely with: " + failueCause.getMessage());
	}
	// cancel
	client.cancel(jobId);

	// wait for the program to be done and validate that we failed with the right exception
	runnerThread.join();

	assertEquals(JobStatus.CANCELED, client.getJobStatus(jobId).get());

	deleteTestTopic(topic);
}
 
Example #11
Source File: BigUserProgramJobSubmitITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Use a map function that references a 100MB byte array.
 */
@Test
public void bigDataInMap() throws Exception {

	final byte[] data = new byte[16 * 1024 * 1024]; // 16 MB
	rnd.nextBytes(data); // use random data so that Java does not optimise it away
	data[1] = 0;
	data[3] = 0;
	data[5] = 0;

	CollectingSink resultSink = new CollectingSink();

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);

	DataStream<Integer> src = env.fromElements(1, 3, 5);

	src.map(new MapFunction<Integer, String>() {
		private static final long serialVersionUID = 1L;

		@Override
		public String map(Integer value) throws Exception {
			return "x " + value + " " + data[value];
		}
	}).addSink(resultSink);

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());

	final RestClusterClient<StandaloneClusterId> restClusterClient = new RestClusterClient<>(
		MINI_CLUSTER_RESOURCE.getClientConfiguration(),
		StandaloneClusterId.getInstance());

	try {
		restClusterClient.setDetached(false);
		restClusterClient.submitJob(jobGraph, BigUserProgramJobSubmitITCase.class.getClassLoader());

		List<String> expected = Arrays.asList("x 1 0", "x 3 0", "x 5 0");

		List<String> result = CollectingSink.result;

		Collections.sort(expected);
		Collections.sort(result);

		assertEquals(expected, result);
	} finally {
		restClusterClient.shutdown();
	}
}
 
Example #12
Source File: KafkaConsumerTestBase.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the source can be properly canceled when reading empty partitions.
 */
public void runCancelingOnEmptyInputTest() throws Exception {
	final String topic = "cancelingOnEmptyInputTopic";

	final int parallelism = 3;
	createTestTopic(topic, parallelism, 1);

	final AtomicReference<Throwable> error = new AtomicReference<>();

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.enableCheckpointing(100);

	Properties props = new Properties();
	props.putAll(standardProps);
	props.putAll(secureProps);
	FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, new SimpleStringSchema(), props);

	env.addSource(source).addSink(new DiscardingSink<String>());

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());
	final JobID jobId = jobGraph.getJobID();

	final Runnable jobRunner = () -> {
		try {
			ClientUtils.submitJobAndWaitForResult(client, jobGraph, KafkaConsumerTestBase.class.getClassLoader());
		} catch (Throwable t) {
			LOG.error("Job Runner failed with exception", t);
			error.set(t);
		}
	};

	Thread runnerThread = new Thread(jobRunner, "program runner thread");
	runnerThread.start();

	// wait a bit before canceling
	Thread.sleep(2000);

	Throwable failueCause = error.get();
	if (failueCause != null) {
		failueCause.printStackTrace();
		Assert.fail("Test failed prematurely with: " + failueCause.getMessage());
	}
	// cancel
	client.cancel(jobId).get();

	// wait for the program to be done and validate that we failed with the right exception
	runnerThread.join();

	assertEquals(JobStatus.CANCELED, client.getJobStatus(jobId).get());

	deleteTestTopic(topic);
}
 
Example #13
Source File: BigUserProgramJobSubmitITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Use a map function that references a 100MB byte array.
 */
@Test
public void bigDataInMap() throws Exception {

	final byte[] data = new byte[16 * 1024 * 1024]; // 16 MB
	rnd.nextBytes(data); // use random data so that Java does not optimise it away
	data[1] = 0;
	data[3] = 0;
	data[5] = 0;

	CollectingSink resultSink = new CollectingSink();

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);

	DataStream<Integer> src = env.fromElements(1, 3, 5);

	src.map(new MapFunction<Integer, String>() {
		private static final long serialVersionUID = 1L;

		@Override
		public String map(Integer value) throws Exception {
			return "x " + value + " " + data[value];
		}
	}).addSink(resultSink);

	JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(env.getStreamGraph());

	final RestClusterClient<StandaloneClusterId> restClusterClient = new RestClusterClient<>(
		MINI_CLUSTER_RESOURCE.getClientConfiguration(),
		StandaloneClusterId.getInstance());

	try {
		ClientUtils.submitJobAndWaitForResult(restClusterClient, jobGraph, BigUserProgramJobSubmitITCase.class.getClassLoader());

		List<String> expected = Arrays.asList("x 1 0", "x 3 0", "x 5 0");

		List<String> result = CollectingSink.result;

		Collections.sort(expected);
		Collections.sort(result);

		assertEquals(expected, result);
	} finally {
		restClusterClient.close();
	}
}