Java Code Examples for org.apache.flink.api.common.restartstrategy.RestartStrategies

The following examples show how to use org.apache.flink.api.common.restartstrategy.RestartStrategies. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink   Source File: KafkaExampleUtil.java    License: Apache License 2.0 6 votes vote down vote up
public static StreamExecutionEnvironment prepareExecutionEnv(ParameterTool parameterTool)
	throws Exception {

	if (parameterTool.getNumberOfParameters() < 5) {
		System.out.println("Missing parameters!\n" +
			"Usage: Kafka --input-topic <topic> --output-topic <topic> " +
			"--bootstrap.servers <kafka brokers> " +
			"--zookeeper.connect <zk quorum> --group.id <some id>");
		throw new Exception("Missing parameters!\n" +
			"Usage: Kafka --input-topic <topic> --output-topic <topic> " +
			"--bootstrap.servers <kafka brokers> " +
			"--zookeeper.connect <zk quorum> --group.id <some id>");
	}

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.getConfig().disableSysoutLogging();
	env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 10000));
	env.enableCheckpointing(5000); // create a checkpoint every 5 seconds
	env.getConfig().setGlobalJobParameters(parameterTool); // make parameters available in the web interface
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

	return env;
}
 
Example 2
Source Project: flink   Source File: CassandraTupleWriteAheadSinkExample.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(1);
	env.enableCheckpointing(1000);
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000));
	env.setStateBackend(new FsStateBackend("file:///" + System.getProperty("java.io.tmpdir") + "/flink/backend"));

	CassandraSink<Tuple2<String, Integer>> sink = CassandraSink.addSink(env.addSource(new MySource()))
		.setQuery("INSERT INTO example.values (id, counter) values (?, ?);")
		.enableWriteAheadLog()
		.setClusterBuilder(new ClusterBuilder() {

			private static final long serialVersionUID = 2793938419775311824L;

			@Override
			public Cluster buildCluster(Cluster.Builder builder) {
				return builder.addContactPoint("127.0.0.1").build();
			}
		})
		.build();

	sink.name("Cassandra Sink").disableChaining().setParallelism(1).uid("hello");

	env.execute();
}
 
Example 3
Source Project: pulsar-flink   Source File: FlinkPulsarITest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testRunFailedOnWrongServiceUrl() {

    try {
        Properties props = MapUtils.toProperties(Collections.singletonMap(TOPIC_SINGLE_OPTION_KEY, "tp"));

        StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
        see.getConfig().disableSysoutLogging();
        see.setRestartStrategy(RestartStrategies.noRestart());
        see.setParallelism(1);

        FlinkPulsarSource<String> source =
                new FlinkPulsarSource<String>("sev", "admin", new SimpleStringSchema(), props).setStartFromEarliest();

        DataStream<String> stream = see.addSource(source);
        stream.print();
        see.execute("wrong service url");
    } catch (Exception e) {
        final Optional<Throwable> optionalThrowable = ExceptionUtils.findThrowableWithMessage(e, "authority component is missing");
        assertTrue(optionalThrowable.isPresent());
        assertTrue(optionalThrowable.get() instanceof PulsarClientException);
    }
}
 
Example 4
Source Project: flink   Source File: RestartStrategyTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that in a streaming use case where checkpointing is enabled, there is no default strategy set on the
 * client side.
 */
@Test
public void testFallbackStrategyOnClientSideWhenCheckpointingEnabled() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(500);

	env.fromElements(1).print();

	StreamGraph graph = env.getStreamGraph();
	JobGraph jobGraph = graph.getJobGraph();

	RestartStrategies.RestartStrategyConfiguration restartStrategy =
		jobGraph.getSerializedExecutionConfig().deserializeValue(getClass().getClassLoader()).getRestartStrategy();

	Assert.assertNotNull(restartStrategy);
	Assert.assertTrue(restartStrategy instanceof RestartStrategies.FallbackRestartStrategyConfiguration);
}
 
Example 5
Source Project: flink   Source File: JobRecoveryITCase.java    License: Apache License 2.0 6 votes vote down vote up
private JobGraph createjobGraph(boolean slotSharingEnabled) throws IOException {
	final JobVertex sender = new JobVertex("Sender");
	sender.setParallelism(PARALLELISM);
	sender.setInvokableClass(TestingAbstractInvokables.Sender.class);

	final JobVertex receiver = new JobVertex("Receiver");
	receiver.setParallelism(PARALLELISM);
	receiver.setInvokableClass(FailingOnceReceiver.class);
	FailingOnceReceiver.reset();

	if (slotSharingEnabled) {
		final SlotSharingGroup slotSharingGroup = new SlotSharingGroup();
		receiver.setSlotSharingGroup(slotSharingGroup);
		sender.setSlotSharingGroup(slotSharingGroup);
	}

	receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);

	final ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0L));

	final JobGraph jobGraph = new JobGraph(getClass().getSimpleName(), sender, receiver);
	jobGraph.setExecutionConfig(executionConfig);

	return jobGraph;
}
 
Example 6
Source Project: flink   Source File: DataStreamAllroundTestJobFactory.java    License: Apache License 2.0 6 votes vote down vote up
private static void setupRestartStrategy(final StreamExecutionEnvironment env, final ParameterTool pt) {
	String restartStrategyConfig = pt.get(ENVIRONMENT_RESTART_STRATEGY.key());
	if (restartStrategyConfig != null) {
		RestartStrategies.RestartStrategyConfiguration restartStrategy;
		switch (restartStrategyConfig) {
			case "fixed_delay":
				restartStrategy = RestartStrategies.fixedDelayRestart(
					pt.getInt(
						ENVIRONMENT_RESTART_STRATEGY_FIXED_ATTEMPTS.key(),
						ENVIRONMENT_RESTART_STRATEGY_FIXED_ATTEMPTS.defaultValue()),
					pt.getLong(
						ENVIRONMENT_RESTART_STRATEGY_FIXED_DELAY.key(),
						ENVIRONMENT_RESTART_STRATEGY_FIXED_DELAY.defaultValue()));
				break;
			case "no_restart":
				restartStrategy = RestartStrategies.noRestart();
				break;
			default:
				throw new IllegalArgumentException("Unknown restart strategy: " + restartStrategyConfig);
		}
		env.setRestartStrategy(restartStrategy);
	}
}
 
Example 7
Source Project: flink   Source File: SchedulingITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Nonnull
private JobGraph createJobGraph(long delay, int parallelism) throws IOException {
	SlotSharingGroup slotSharingGroup = new SlotSharingGroup();

	final JobVertex source = new JobVertex("source");
	source.setInvokableClass(OneTimeFailingInvokable.class);
	source.setParallelism(parallelism);
	source.setSlotSharingGroup(slotSharingGroup);

	final JobVertex sink = new JobVertex("sink");
	sink.setInvokableClass(NoOpInvokable.class);
	sink.setParallelism(parallelism);
	sink.setSlotSharingGroup(slotSharingGroup);

	sink.connectNewDataSetAsInput(source, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
	JobGraph jobGraph = new JobGraph(source, sink);

	jobGraph.setScheduleMode(ScheduleMode.EAGER);

	ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, delay));
	jobGraph.setExecutionConfig(executionConfig);

	return jobGraph;
}
 
Example 8
Source Project: flink   Source File: ExecutionConfig.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Returns the restart strategy which has been set for the current job.
 *
 * @return The specified restart configuration
 */
@PublicEvolving
@SuppressWarnings("deprecation")
public RestartStrategies.RestartStrategyConfiguration getRestartStrategy() {
	if (restartStrategyConfiguration instanceof RestartStrategies.FallbackRestartStrategyConfiguration) {
		// support the old API calls by creating a restart strategy from them
		if (getNumberOfExecutionRetries() > 0 && getExecutionRetryDelay() >= 0) {
			return RestartStrategies.fixedDelayRestart(getNumberOfExecutionRetries(), getExecutionRetryDelay());
		} else if (getNumberOfExecutionRetries() == 0) {
			return RestartStrategies.noRestart();
		} else {
			return restartStrategyConfiguration;
		}
	} else {
		return restartStrategyConfiguration;
	}
}
 
Example 9
public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.getConfig().setGlobalJobParameters(ParameterTool.fromArgs(args));
    //每隔 10s 重启一次,如果两分钟内重启过三次则停止 Job
    env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.minutes(2), Time.seconds(10)));

    env.addSource(new SourceFunction<Long>() {
        @Override
        public void run(SourceContext<Long> sourceContext) throws Exception {
            while (true) {
                sourceContext.collect(null);
            }
        }
        @Override
        public void cancel() {
        }
    })
            .map((MapFunction<Long, Long>) aLong -> aLong / 1)
            .print();

    env.execute("zhisheng failureRate Restart Strategy example");
}
 
Example 10
Source Project: Flink-CEPplus   Source File: RestartStrategyTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Checks that in a streaming use case where checkpointing is enabled and the number
 * of execution retries is set to 0, restarting is deactivated.
 */
@Test
public void testNoRestartingWhenCheckpointingAndExplicitExecutionRetriesZero() throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(500);
	env.setNumberOfExecutionRetries(0);

	env.fromElements(1).print();

	StreamGraph graph = env.getStreamGraph();
	JobGraph jobGraph = graph.getJobGraph();

	RestartStrategies.RestartStrategyConfiguration restartStrategy =
		jobGraph.getSerializedExecutionConfig().deserializeValue(getClass().getClassLoader()).getRestartStrategy();

	Assert.assertNotNull(restartStrategy);
	Assert.assertTrue(restartStrategy instanceof RestartStrategies.NoRestartStrategyConfiguration);
}
 
Example 11
Source Project: flink   Source File: AbstractQueryableStateTestBase.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Tests simple value state queryable state instance. Each source emits
 * (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
 * queried. The tests succeeds after each subtask index is queried with
 * value numElements (the latest element updated the state).
 */
@Test
public void testValueState() throws Exception {
	final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT);
	final long numElements = 1024L;

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStateBackend(stateBackend);
	env.setParallelism(maxParallelism);
	// Very important, because cluster is shared between tests and we
	// don't explicitly check that all slots are available before
	// submitting.
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));

	DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));

	// Value state
	ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>("any", source.getType());

	source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
		private static final long serialVersionUID = 7662520075515707428L;

		@Override
		public Integer getKey(Tuple2<Integer, Long> value) {
			return value.f0;
		}
	}).asQueryableState("hakuna", valueState);

	try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(deadline, clusterClient, env)) {

		final JobID jobId = autoCancellableJob.getJobId();
		final JobGraph jobGraph = autoCancellableJob.getJobGraph();

		ClientUtils.submitJob(clusterClient, jobGraph);
		executeValueQuery(deadline, client, jobId, "hakuna", valueState, numElements);
	}
}
 
Example 12
Source Project: Flink-CEPplus   Source File: KafkaShortRetentionTestBase.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Ensure that the consumer is properly failing if "auto.offset.reset" is set to "none".
 */
public void runFailOnAutoOffsetResetNone() throws Exception {
	final String topic = "auto-offset-reset-none-test";
	final int parallelism = 1;

	kafkaServer.createTestTopic(topic, parallelism, 1);

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.setRestartStrategy(RestartStrategies.noRestart()); // fail immediately
	env.getConfig().disableSysoutLogging();

	// ----------- add consumer ----------

	Properties customProps = new Properties();
	customProps.putAll(standardProps);
	customProps.putAll(secureProps);
	customProps.setProperty("auto.offset.reset", "none"); // test that "none" leads to an exception
	FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer(topic, new SimpleStringSchema(), customProps);

	DataStreamSource<String> consuming = env.addSource(source);
	consuming.addSink(new DiscardingSink<String>());

	try {
		env.execute("Test auto offset reset none");
	} catch (Throwable e) {
		// check if correct exception has been thrown
		if (!e.getCause().getCause().getMessage().contains("Unable to find previous offset")  // kafka 0.8
			&& !e.getCause().getCause().getMessage().contains("Undefined offset with no reset policy for partition") // kafka 0.9
				) {
			throw e;
		}
	}

	kafkaServer.deleteTestTopic(topic);
}
 
Example 13
Source Project: pulsar-flink   Source File: FlinkPulsarITest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testOne2OneExactlyOnce() throws Exception {
    String topic = newTopic();
    int parallelism = 5;
    int numElementsPerPartition = 1000;
    int totalElements = parallelism * numElementsPerPartition;
    int failAfterElements = numElementsPerPartition / 3;

    List<String> allTopicNames = new ArrayList<>();
    for (int i = 0; i < parallelism; i++) {
        allTopicNames.add(topic + "-partition-" + i);
    }

    createTopic(topic, parallelism, adminUrl);

    generateRandomizedIntegerSequence(
            StreamExecutionEnvironment.getExecutionEnvironment(), topic, parallelism, numElementsPerPartition, true);

    // run the topology that fails and recovers

    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.enableCheckpointing(500);
    env.setParallelism(parallelism);
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));
    env.getConfig().disableSysoutLogging();

    Properties sourceProps = sourceProperties();
    sourceProps.setProperty(TOPIC_MULTI_OPTION_KEY, StringUtils.join(allTopicNames, ','));

    env.addSource(new FlinkPulsarRowSource(serviceUrl, adminUrl, sourceProps).setStartFromEarliest())
            .map(new PartitionValidationMapper(parallelism, 1))
            .map(new FailingIdentityMapper<Row>(failAfterElements))
            .addSink(new ValidatingExactlyOnceSink(totalElements)).setParallelism(1);

    FailingIdentityMapper.failedBefore = false;
    TestUtils.tryExecute(env, "one to one exactly once test");
}
 
Example 14
Source Project: flink   Source File: SimpleRecoveryITCaseBase.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testRestartMultipleTimes() {
	try {
		List<Long> resultCollection = new ArrayList<Long>();

		ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

		env.setParallelism(4);
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(5, 100));
		env.getConfig().disableSysoutLogging();

		env.generateSequence(1, 10)
				.rebalance()
				.map(new FailingMapper3<Long>())
				.reduce(new ReduceFunction<Long>() {
					@Override
					public Long reduce(Long value1, Long value2) {
						return value1 + value2;
					}
				})
				.output(new LocalCollectionOutputFormat<Long>(resultCollection));

		executeAndRunAssertions(env);

		long sum = 0;
		for (long l : resultCollection) {
			sum += l;
		}
		assertEquals(55, sum);
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	} finally {
		FailingMapper3.failuresBeforeSuccess = 3;
	}
}
 
Example 15
Source Project: Flink-CEPplus   Source File: RescalingITCase.java    License: Apache License 2.0 5 votes vote down vote up
private static JobGraph createJobGraphWithOperatorState(
		int parallelism, int maxParallelism, OperatorCheckpointMethod checkpointMethod) {

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.getConfig().setMaxParallelism(maxParallelism);
	env.enableCheckpointing(Long.MAX_VALUE);
	env.setRestartStrategy(RestartStrategies.noRestart());

	StateSourceBase.workStartedLatch = new CountDownLatch(parallelism);

	SourceFunction<Integer> src;

	switch (checkpointMethod) {
		case CHECKPOINTED_FUNCTION:
			src = new PartitionedStateSource(false);
			break;
		case CHECKPOINTED_FUNCTION_BROADCAST:
			src = new PartitionedStateSource(true);
			break;
		case LIST_CHECKPOINTED:
			src = new PartitionedStateSourceListCheckpointed();
			break;
		case NON_PARTITIONED:
			src = new NonPartitionedStateSource();
			break;
		default:
			throw new IllegalArgumentException();
	}

	DataStream<Integer> input = env.addSource(src);

	input.addSink(new DiscardingSink<Integer>());

	return env.getStreamGraph().getJobGraph();
}
 
Example 16
Source Project: flink   Source File: StateBackendITCase.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Verify that the user-specified state backend is used even if checkpointing is disabled.
 */
@Test
public void testStateBackendWithoutCheckpointing() throws Exception {

	StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
	see.setParallelism(1);

	see.getConfig().setRestartStrategy(RestartStrategies.noRestart());
	see.setStateBackend(new FailingStateBackend());

	see.fromElements(new Tuple2<>("Hello", 1))
		.keyBy(0)
		.map(new RichMapFunction<Tuple2<String, Integer>, String>() {
			private static final long serialVersionUID = 1L;

			@Override
			public void open(Configuration parameters) throws Exception {
				super.open(parameters);
				getRuntimeContext().getState(new ValueStateDescriptor<>("Test", Integer.class));
			}

			@Override
			public String map(Tuple2<String, Integer> value) throws Exception {
				return value.f0;
			}
		})
		.print();

	try {
		see.execute();
		fail();
	}
	catch (JobExecutionException e) {
		assertTrue(ExceptionUtils.findThrowable(e, SuccessException.class).isPresent());
	}
}
 
Example 17
Source Project: Flink-CEPplus   Source File: RescalingITCase.java    License: Apache License 2.0 5 votes vote down vote up
private static JobGraph createJobGraphWithKeyedAndNonPartitionedOperatorState(
		int parallelism,
		int maxParallelism,
		int fixedParallelism,
		int numberKeys,
		int numberElements,
		boolean terminateAfterEmission,
		int checkpointingInterval) {

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.getConfig().setMaxParallelism(maxParallelism);
	env.enableCheckpointing(checkpointingInterval);
	env.setRestartStrategy(RestartStrategies.noRestart());

	DataStream<Integer> input = env.addSource(new SubtaskIndexNonPartitionedStateSource(
			numberKeys,
			numberElements,
			terminateAfterEmission))
			.setParallelism(fixedParallelism)
			.keyBy(new KeySelector<Integer, Integer>() {
				private static final long serialVersionUID = -7952298871120320940L;

				@Override
				public Integer getKey(Integer value) throws Exception {
					return value;
				}
			});

	SubtaskIndexFlatMapper.workCompletedLatch = new CountDownLatch(numberKeys);

	DataStream<Tuple2<Integer, Integer>> result = input.flatMap(new SubtaskIndexFlatMapper(numberElements));

	result.addSink(new CollectionSink<Tuple2<Integer, Integer>>());

	return env.getStreamGraph().getJobGraph();
}
 
Example 18
Source Project: flink   Source File: ExecutionGraphRestartTest.java    License: Apache License 2.0 5 votes vote down vote up
private static JobGraph createJobGraphToCancel() throws IOException {
	JobVertex vertex = ExecutionGraphTestUtils.createJobVertex("Test Vertex", 1, NoOpInvokable.class);
	ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(
		Integer.MAX_VALUE, Integer.MAX_VALUE));
	JobGraph jobGraph = new JobGraph("Test Job", vertex);
	jobGraph.setExecutionConfig(executionConfig);
	return jobGraph;
}
 
Example 19
Source Project: flink   Source File: AbstractQueryableStateTestBase.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Tests simple value state queryable state instance. Each source emits
 * (subtaskIndex, 0)..(subtaskIndex, numElements) tuples, which are then
 * queried. The tests succeeds after each subtask index is queried with
 * value numElements (the latest element updated the state).
 */
@Test
public void testValueState() throws Exception {
	final Deadline deadline = Deadline.now().plus(TEST_TIMEOUT);
	final long numElements = 1024L;

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setStateBackend(stateBackend);
	env.setParallelism(maxParallelism);
	// Very important, because cluster is shared between tests and we
	// don't explicitly check that all slots are available before
	// submitting.
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000L));

	DataStream<Tuple2<Integer, Long>> source = env.addSource(new TestAscendingValueSource(numElements));

	// Value state
	ValueStateDescriptor<Tuple2<Integer, Long>> valueState = new ValueStateDescriptor<>("any", source.getType());

	source.keyBy(new KeySelector<Tuple2<Integer, Long>, Integer>() {
		private static final long serialVersionUID = 7662520075515707428L;

		@Override
		public Integer getKey(Tuple2<Integer, Long> value) {
			return value.f0;
		}
	}).asQueryableState("hakuna", valueState);

	try (AutoCancellableJob autoCancellableJob = new AutoCancellableJob(deadline, clusterClient, env)) {

		final JobID jobId = autoCancellableJob.getJobId();
		final JobGraph jobGraph = autoCancellableJob.getJobGraph();

		clusterClient.setDetached(true);
		clusterClient.submitJob(jobGraph, AbstractQueryableStateTestBase.class.getClassLoader());

		executeValueQuery(deadline, client, jobId, "hakuna", valueState, numElements);
	}
}
 
Example 20
Source Project: flink   Source File: ExecutionEntry.java    License: Apache License 2.0 5 votes vote down vote up
public RestartStrategies.RestartStrategyConfiguration getRestartStrategy() {
	return properties.getOptionalString(EXECUTION_RESTART_STRATEGY_TYPE)
		.flatMap((v) -> {
			switch (v) {
				case EXECUTION_RESTART_STRATEGY_TYPE_VALUE_NONE:
					return Optional.of(RestartStrategies.noRestart());
				case EXECUTION_RESTART_STRATEGY_TYPE_VALUE_FIXED_DELAY:
					final int attempts = properties.getOptionalInt(EXECUTION_RESTART_STRATEGY_ATTEMPTS)
						.orElseGet(() -> useDefaultValue(EXECUTION_RESTART_STRATEGY_ATTEMPTS, Integer.MAX_VALUE));
					final long fixedDelay = properties.getOptionalLong(EXECUTION_RESTART_STRATEGY_DELAY)
						.orElseGet(() -> useDefaultValue(EXECUTION_RESTART_STRATEGY_DELAY, 10_000L));
					return Optional.of(RestartStrategies.fixedDelayRestart(attempts, fixedDelay));
				case EXECUTION_RESTART_STRATEGY_TYPE_VALUE_FAILURE_RATE:
					final int failureRate = properties.getOptionalInt(EXECUTION_RESTART_STRATEGY_MAX_FAILURES_PER_INTERVAL)
						.orElseGet(() -> useDefaultValue(EXECUTION_RESTART_STRATEGY_MAX_FAILURES_PER_INTERVAL, 1));
					final long failureInterval = properties.getOptionalLong(EXECUTION_RESTART_STRATEGY_FAILURE_RATE_INTERVAL)
						.orElseGet(() -> useDefaultValue(EXECUTION_RESTART_STRATEGY_FAILURE_RATE_INTERVAL, 60_000L));
					final long attemptDelay = properties.getOptionalLong(EXECUTION_RESTART_STRATEGY_DELAY)
						.orElseGet(() -> useDefaultValue(EXECUTION_RESTART_STRATEGY_DELAY, 10_000L));
					return Optional.of(RestartStrategies.failureRateRestart(
						failureRate,
						Time.milliseconds(failureInterval),
						Time.milliseconds(attemptDelay)));
				default:
					return Optional.empty();
				}
		})
		.orElseGet(() ->
			useDefaultValue(
				EXECUTION_RESTART_STRATEGY_TYPE,
				RestartStrategies.fallBackRestart(),
				EXECUTION_RESTART_STRATEGY_TYPE_VALUE_FALLBACK));
}
 
Example 21
Source Project: flink   Source File: RescalingITCase.java    License: Apache License 2.0 5 votes vote down vote up
private static JobGraph createJobGraphWithKeyedState(
		int parallelism,
		int maxParallelism,
		int numberKeys,
		int numberElements,
		boolean terminateAfterEmission,
		int checkpointingInterval) {

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	if (0 < maxParallelism) {
		env.getConfig().setMaxParallelism(maxParallelism);
	}
	env.enableCheckpointing(checkpointingInterval);
	env.setRestartStrategy(RestartStrategies.noRestart());
	env.getConfig().setUseSnapshotCompression(true);

	DataStream<Integer> input = env.addSource(new SubtaskIndexSource(
			numberKeys,
			numberElements,
			terminateAfterEmission))
			.keyBy(new KeySelector<Integer, Integer>() {
				private static final long serialVersionUID = -7952298871120320940L;

				@Override
				public Integer getKey(Integer value) throws Exception {
					return value;
				}
			});

	SubtaskIndexFlatMapper.workCompletedLatch = new CountDownLatch(numberKeys);

	DataStream<Tuple2<Integer, Integer>> result = input.flatMap(new SubtaskIndexFlatMapper(numberElements));

	result.addSink(new CollectionSink<Tuple2<Integer, Integer>>());

	return env.getStreamGraph().getJobGraph();
}
 
Example 22
Source Project: flink   Source File: StateBackendITCase.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Verify that the user-specified state backend is used even if checkpointing is disabled.
 */
@Test
public void testStateBackendWithoutCheckpointing() throws Exception {

	StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
	see.setParallelism(1);

	see.getConfig().setRestartStrategy(RestartStrategies.noRestart());
	see.setStateBackend(new FailingStateBackend());

	see.fromElements(new Tuple2<>("Hello", 1))
		.keyBy(0)
		.map(new RichMapFunction<Tuple2<String, Integer>, String>() {
			private static final long serialVersionUID = 1L;

			@Override
			public void open(Configuration parameters) throws Exception {
				super.open(parameters);
				getRuntimeContext().getState(new ValueStateDescriptor<>("Test", Integer.class));
			}

			@Override
			public String map(Tuple2<String, Integer> value) throws Exception {
				return value.f0;
			}
		})
		.print();

	try {
		see.execute();
		fail();
	}
	catch (JobExecutionException e) {
		assertTrue(ExceptionUtils.findThrowable(e, SuccessException.class).isPresent());
	}
}
 
Example 23
@Test
public void testFailureRateRestartStrategySpecifiedInJobConfig() {
	final Configuration conf = new Configuration();
	conf.setString(RestartStrategyOptions.RESTART_STRATEGY, "fixed-delay");

	final RestartBackoffTimeStrategy.Factory factory =
		RestartBackoffTimeStrategyFactoryLoader.createRestartBackoffTimeStrategyFactory(
			RestartStrategies.failureRateRestart(1, Time.milliseconds(1000), Time.milliseconds(1000)),
			conf,
			false);

	assertThat(
		factory,
		instanceOf(FailureRateRestartBackoffTimeStrategy.FailureRateRestartBackoffTimeStrategyFactory.class));
}
 
Example 24
Source Project: flink-learning   Source File: ExecutionEnvUtil.java    License: Apache License 2.0 5 votes vote down vote up
public static StreamExecutionEnvironment prepare(ParameterTool parameterTool) throws Exception {
    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
    env.setParallelism(parameterTool.getInt(PropertiesConstants.STREAM_PARALLELISM, 5));
    env.getConfig().disableSysoutLogging();
    env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(4, 60000));
    if (parameterTool.getBoolean(PropertiesConstants.STREAM_CHECKPOINT_ENABLE, true)) {
        env.enableCheckpointing(parameterTool.getLong(PropertiesConstants.STREAM_CHECKPOINT_INTERVAL, 10000));
    }
    env.getConfig().setGlobalJobParameters(parameterTool);
    env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
    return env;
}
 
Example 25
public static void main(String[] args) throws Exception {

		ParameterTool params = ParameterTool.fromArgs(args);
		final String input = params.getRequired("input");

		final int servingSpeedFactor = 600; // events of 10 minutes are served in 1 second

		// set up streaming execution environment
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		// operate in Event-time
		env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
		// create a checkpoint every 5 seconds
		env.enableCheckpointing(5000);
		// try to restart 60 times with 10 seconds delay (10 Minutes)
		env.setRestartStrategy(RestartStrategies.fixedDelayRestart(60, Time.of(10, TimeUnit.SECONDS)));

		// start the data generator
		DataStream<TaxiRide> rides = env.addSource(
				new CheckpointedTaxiRideSource(input, servingSpeedFactor));

		DataStream<Tuple2<Long, Integer>> predictions = rides
			// filter out rides that do not start or stop in NYC
			.filter(new NYCFilter())
			// map taxi ride events to the grid cell of the destination
			.map(new GridCellMatcher())
			// organize stream by destination
			.keyBy(0)
			// predict and refine model per destination
			.flatMap(new PredictionModel());

		// print the predictions
		predictions.print();

		// run the prediction pipeline
		env.execute("Taxi Ride Prediction");
	}
 
Example 26
Source Project: Flink-CEPplus   Source File: MiniClusterITCase.java    License: Apache License 2.0 5 votes vote down vote up
private static JobGraph getSimpleJob(int parallelism) throws IOException {
	final JobVertex task = new JobVertex("Test task");
	task.setParallelism(parallelism);
	task.setMaxParallelism(parallelism);
	task.setInvokableClass(NoOpInvokable.class);

	final JobGraph jg = new JobGraph(new JobID(), "Test Job", task);
	jg.setScheduleMode(ScheduleMode.EAGER);

	final ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
	jg.setExecutionConfig(executionConfig);

	return jg;
}
 
Example 27
Source Project: Flink-CEPplus   Source File: TaskExecutorITCase.java    License: Apache License 2.0 5 votes vote down vote up
private JobGraph createJobGraphWithRestartStrategy(int parallelism) throws IOException {
	final JobGraph jobGraph = createJobGraph(parallelism);
	final ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0L));
	jobGraph.setExecutionConfig(executionConfig);

	return jobGraph;
}
 
Example 28
Source Project: flink   Source File: ExecutionGraphRestartTest.java    License: Apache License 2.0 5 votes vote down vote up
private static JobGraph createJobGraphToCancel() throws IOException {
	JobVertex vertex = ExecutionGraphTestUtils.createJobVertex("Test Vertex", 1, NoOpInvokable.class);
	ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(
		Integer.MAX_VALUE, Integer.MAX_VALUE));
	JobGraph jobGraph = new JobGraph("Test Job", vertex);
	jobGraph.setExecutionConfig(executionConfig);
	return jobGraph;
}
 
Example 29
Source Project: pulsar-flink   Source File: FlinkPulsarITest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testJson() throws Exception {
    String topic = newTopic();

    sendTypedMessages(topic, SchemaType.JSON, fooList, Optional.empty(), SchemaData.Foo.class);

    StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
    see.setParallelism(1);
    see.getConfig().disableSysoutLogging();
    see.setRestartStrategy(RestartStrategies.noRestart());

    String subName = UUID.randomUUID().toString();

    Properties sourceProps = sourceProperties();
    sourceProps.setProperty(TOPIC_SINGLE_OPTION_KEY, topic);

    FlinkPulsarSource<SchemaData.Foo> source =
            new FlinkPulsarSource<>(serviceUrl, adminUrl, JsonDeser.of(SchemaData.Foo.class), sourceProps)
                    .setStartFromEarliest();

    DataStream<Integer> ds = see.addSource(source)
            .map(SchemaData.Foo::getI);

    ds.map(new FailingIdentityMapper<>(fooList.size()))
            .addSink(new SingletonStreamSink.StringSink<>()).setParallelism(1);

    try {
        see.execute("test read data of POJO using JSON");
    } catch (Exception e) {

    }

    SingletonStreamSink.compareWithList(
            fooList.subList(0, fooList.size() - 1).stream().map(SchemaData.Foo::getI).map(Objects::toString).collect(Collectors.toList()));
}
 
Example 30
Source Project: flink   Source File: MiniClusterITCase.java    License: Apache License 2.0 5 votes vote down vote up
private static JobGraph getSimpleJob(int parallelism) throws IOException {
	final JobVertex task = new JobVertex("Test task");
	task.setParallelism(parallelism);
	task.setMaxParallelism(parallelism);
	task.setInvokableClass(NoOpInvokable.class);

	final JobGraph jg = new JobGraph(new JobID(), "Test Job", task);
	jg.setScheduleMode(ScheduleMode.EAGER);

	final ExecutionConfig executionConfig = new ExecutionConfig();
	executionConfig.setRestartStrategy(RestartStrategies.fixedDelayRestart(Integer.MAX_VALUE, 1000));
	jg.setExecutionConfig(executionConfig);

	return jg;
}