Java Code Examples for org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#createRemoteEnvironment()

The following examples show how to use org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#createRemoteEnvironment() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FlinkAutoConfiguration.java    From Flink-Streaming-Spring-Boot with MIT License 6 votes vote down vote up
@Bean("flinkEnvironment")
StreamExecutionEnvironment getFlinkEnvironment(FlinkProperties flinkProperties) {
    long maxBytes = flinkProperties.getMaxClientRestRequestSizeBytes();
    org.apache.flink.configuration.Configuration config = new org.apache.flink.configuration.Configuration();
    config.setString("rest.address", flinkProperties.getJobManagerUrl());
    config.setInteger("rest.port", flinkProperties.getJobManagerPort());
    config.setLong("rest.client.max-content-length", maxBytes);
    config.setLong("rest.server.max-content-length", maxBytes);
    config.setString("akka.framesize", maxBytes + "b");

    return StreamExecutionEnvironment.createRemoteEnvironment(
        flinkProperties.getJobManagerUrl(),
        flinkProperties.getJobManagerPort(),
        config,
        flinkProperties.getRemoteEnvJarFiles().stream().toArray(String[]::new));
}
 
Example 2
Source File: JobManagerWatermarkTrackerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testUpateWatermark() throws Exception {
	final Configuration clientConfiguration = new Configuration();
	clientConfiguration.setInteger(RestOptions.RETRY_MAX_ATTEMPTS, 0);

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(
		flink.getRestAddress().get().getHost(),
		flink.getRestAddress().get().getPort(),
		clientConfiguration);

	env.addSource(new TestSourceFunction(new JobManagerWatermarkTracker("fakeId")))
		.addSink(new SinkFunction<Integer>() {});
	env.execute();
}
 
Example 3
Source File: JobManagerWatermarkTrackerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testUpateWatermark() throws Exception {
	final Configuration clientConfiguration = new Configuration();
	clientConfiguration.setInteger(RestOptions.RETRY_MAX_ATTEMPTS, 0);

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(
		flink.getRestAddress().get().getHost(),
		flink.getRestAddress().get().getPort(),
		clientConfiguration);

	env.addSource(new TestSourceFunction(new JobManagerWatermarkTracker("fakeId")))
		.addSink(new SinkFunction<Integer>() {});
	env.execute();
}
 
Example 4
Source File: JobManagerWatermarkTrackerTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testUpateWatermark() throws Exception {
	final Configuration clientConfiguration = new Configuration();
	clientConfiguration.setInteger(RestOptions.RETRY_MAX_ATTEMPTS, 0);

	final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(
		flink.getRestAddress().get().getHost(),
		flink.getRestAddress().get().getPort(),
		clientConfiguration);

	env.addSource(new TestSourceFunction(new JobManagerWatermarkTracker("fakeId")))
		.addSink(new SinkFunction<Integer>() {});
	env.execute();
}
 
Example 5
Source File: KinesisEventsGeneratorProducerThread.java    From flink with Apache License 2.0 4 votes vote down vote up
public static Thread create(final int totalEventCount,
							final int parallelism,
							final String awsAccessKey,
							final String awsSecretKey,
							final String awsRegion,
							final String kinesisStreamName,
							final AtomicReference<Throwable> errorHandler,
							final int flinkPort,
							final Configuration flinkConfig) {
	Runnable kinesisEventsGeneratorProducer = new Runnable() {
		@Override
		public void run() {
			try {
				StreamExecutionEnvironment see = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort, flinkConfig);
				see.setParallelism(parallelism);

				// start data generator
				DataStream<String> simpleStringStream = see.addSource(new KinesisEventsGeneratorProducerThread.EventsGenerator(totalEventCount)).setParallelism(1);

				Properties producerProps = new Properties();
				producerProps.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, awsAccessKey);
				producerProps.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, awsSecretKey);
				producerProps.setProperty(AWSConfigConstants.AWS_REGION, awsRegion);

				FlinkKinesisProducer<String> kinesis = new FlinkKinesisProducer<>(new SimpleStringSchema(),
					producerProps);

				kinesis.setFailOnError(true);
				kinesis.setDefaultStream(kinesisStreamName);
				kinesis.setDefaultPartition("0");
				simpleStringStream.addSink(kinesis);

				LOG.info("Starting producing topology");
				see.execute("Producing topology");
				LOG.info("Producing topo finished");
			} catch (Exception e) {
				LOG.warn("Error while running producing topology", e);
				errorHandler.set(e);
			}
		}
	};

	return new Thread(kinesisEventsGeneratorProducer);
}
 
Example 6
Source File: TaskManagerProcessFailureStreamingRecoveryITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public void testTaskManagerFailure(Configuration configuration, final File coordinateDir) throws Exception {

	final File tempCheckpointDir = tempFolder.newFolder();

	StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(
		"localhost",
		1337, // not needed since we use ZooKeeper
		configuration);
	env.setParallelism(PARALLELISM);
			env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000));
	env.enableCheckpointing(200);

	env.setStateBackend(new FsStateBackend(tempCheckpointDir.getAbsoluteFile().toURI()));

	DataStream<Long> result = env.addSource(new SleepyDurableGenerateSequence(coordinateDir, DATA_COUNT))
			// add a non-chained no-op map to test the chain state restore logic
			.map(new MapFunction<Long, Long>() {
				@Override
				public Long map(Long value) throws Exception {
					return value;
				}
			}).startNewChain()
					// populate the coordinate directory so we can proceed to TaskManager failure
			.map(new Mapper(coordinateDir));

	//write result to temporary file
	result.addSink(new CheckpointedSink(DATA_COUNT));

	try {
		// blocking call until execution is done
		env.execute();

		// TODO: Figure out why this fails when ran with other tests
		// Check whether checkpoints have been cleaned up properly
		// assertDirectoryEmpty(tempCheckpointDir);
	}
	finally {
		// clean up
		if (tempCheckpointDir.exists()) {
			FileUtils.deleteDirectory(tempCheckpointDir);
		}
	}
}
 
Example 7
Source File: ExactlyOnceValidatingConsumerThread.java    From flink with Apache License 2.0 4 votes vote down vote up
public static Thread create(final int totalEventCount,
							final int failAtRecordCount,
							final int parallelism,
							final int checkpointInterval,
							final long restartDelay,
							final String awsAccessKey,
							final String awsSecretKey,
							final String awsRegion,
							final String kinesisStreamName,
							final AtomicReference<Throwable> errorHandler,
							final int flinkPort,
							final Configuration flinkConfig) {
	Runnable exactlyOnceValidationConsumer = new Runnable() {
		@Override
		public void run() {
			try {
				StreamExecutionEnvironment see = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort, flinkConfig);
				see.setParallelism(parallelism);
				see.enableCheckpointing(checkpointInterval);
				// we restart two times
				see.setRestartStrategy(RestartStrategies.fixedDelayRestart(2, restartDelay));

				// consuming topology
				Properties consumerProps = new Properties();
				consumerProps.setProperty(ConsumerConfigConstants.AWS_ACCESS_KEY_ID, awsAccessKey);
				consumerProps.setProperty(ConsumerConfigConstants.AWS_SECRET_ACCESS_KEY, awsSecretKey);
				consumerProps.setProperty(ConsumerConfigConstants.AWS_REGION, awsRegion);
				// start reading from beginning
				consumerProps.setProperty(ConsumerConfigConstants.STREAM_INITIAL_POSITION, ConsumerConfigConstants.InitialPosition.TRIM_HORIZON.name());
				DataStream<String> consuming = see.addSource(new FlinkKinesisConsumer<>(kinesisStreamName, new SimpleStringSchema(), consumerProps));
				consuming
					.flatMap(new ArtificialFailOnceFlatMapper(failAtRecordCount))
					// validate consumed records for correctness (use only 1 instance to validate all consumed records)
					.flatMap(new ExactlyOnceValidatingMapper(totalEventCount)).setParallelism(1);

				LOG.info("Starting consuming topology");
				tryExecute(see, "Consuming topo");
				LOG.info("Consuming topo finished");
			} catch (Exception e) {
				LOG.warn("Error while running consuming topology", e);
				errorHandler.set(e);
			}
		}
	};

	return new Thread(exactlyOnceValidationConsumer);
}
 
Example 8
Source File: KinesisEventsGeneratorProducerThread.java    From flink with Apache License 2.0 4 votes vote down vote up
public static Thread create(final int totalEventCount,
							final int parallelism,
							final String awsAccessKey,
							final String awsSecretKey,
							final String awsRegion,
							final String kinesisStreamName,
							final AtomicReference<Throwable> errorHandler,
							final int flinkPort,
							final Configuration flinkConfig) {
	Runnable kinesisEventsGeneratorProducer = new Runnable() {
		@Override
		public void run() {
			try {
				StreamExecutionEnvironment see = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort, flinkConfig);
				see.setParallelism(parallelism);

				// start data generator
				DataStream<String> simpleStringStream = see.addSource(new KinesisEventsGeneratorProducerThread.EventsGenerator(totalEventCount)).setParallelism(1);

				Properties producerProps = new Properties();
				producerProps.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, awsAccessKey);
				producerProps.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, awsSecretKey);
				producerProps.setProperty(AWSConfigConstants.AWS_REGION, awsRegion);

				FlinkKinesisProducer<String> kinesis = new FlinkKinesisProducer<>(new SimpleStringSchema(),
					producerProps);

				kinesis.setFailOnError(true);
				kinesis.setDefaultStream(kinesisStreamName);
				kinesis.setDefaultPartition("0");
				simpleStringStream.addSink(kinesis);

				LOG.info("Starting producing topology");
				see.execute("Producing topology");
				LOG.info("Producing topo finished");
			} catch (Exception e) {
				LOG.warn("Error while running producing topology", e);
				errorHandler.set(e);
			}
		}
	};

	return new Thread(kinesisEventsGeneratorProducer);
}
 
Example 9
Source File: FlinkPipelineExecutionEnvironment.java    From flink-dataflow with Apache License 2.0 4 votes vote down vote up
/**
 * If the submitted job is a stream processing job, this method creates the adequate
 * Flink {@link org.apache.flink.streaming.api.environment.StreamExecutionEnvironment} depending
 * on the user-specified options.
 */
private void createStreamExecutionEnvironment() {
	if (this.flinkStreamEnv != null || this.flinkBatchEnv != null) {
		throw new RuntimeException("FlinkPipelineExecutionEnvironment already initialized.");
	}

	LOG.info("Creating the required Streaming Environment.");

	String masterUrl = options.getFlinkMaster();
	this.flinkBatchEnv = null;

	// depending on the master, create the right environment.
	if (masterUrl.equals("[local]")) {
		this.flinkStreamEnv = StreamExecutionEnvironment.createLocalEnvironment();
	} else if (masterUrl.equals("[auto]")) {
		this.flinkStreamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
	} else if (masterUrl.matches(".*:\\d*")) {
		String[] parts = masterUrl.split(":");
		List<String> stagingFiles = options.getFilesToStage();
		this.flinkStreamEnv = StreamExecutionEnvironment.createRemoteEnvironment(parts[0],
				Integer.parseInt(parts[1]), stagingFiles.toArray(new String[stagingFiles.size()]));
	} else {
		LOG.warn("Unrecognized Flink Master URL {}. Defaulting to [auto].", masterUrl);
		this.flinkStreamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
	}

	// set the correct parallelism.
	if (options.getParallelism() != -1) {
		this.flinkStreamEnv.setParallelism(options.getParallelism());
	}

	// set parallelism in the options (required by some execution code)
	options.setParallelism(flinkStreamEnv.getParallelism());

	// default to event time
	this.flinkStreamEnv.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

	// for the following 2 parameters, a value of -1 means that Flink will use
	// the default values as specified in the configuration.
	int numRetries = options.getNumberOfExecutionRetries();
	if (numRetries != -1) {
		this.flinkStreamEnv.setNumberOfExecutionRetries(numRetries);
	}
	long retryDelay = options.getExecutionRetryDelay();
	if (retryDelay != -1) {
		this.flinkStreamEnv.getConfig().setExecutionRetryDelay(retryDelay);
	}

	// A value of -1 corresponds to disabled checkpointing (see CheckpointConfig in Flink).
	// If the value is not -1, then the validity checks are applied.
	// By default, checkpointing is disabled.
	long checkpointInterval = options.getCheckpointingInterval();
	if(checkpointInterval != -1) {
		if (checkpointInterval < 1) {
			throw new IllegalArgumentException("The checkpoint interval must be positive");
		}
		this.flinkStreamEnv.enableCheckpointing(checkpointInterval);
	}
}
 
Example 10
Source File: TaskManagerProcessFailureStreamingRecoveryITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public void testTaskManagerFailure(Configuration configuration, final File coordinateDir) throws Exception {

	final File tempCheckpointDir = tempFolder.newFolder();

	StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(
		"localhost",
		1337, // not needed since we use ZooKeeper
		configuration);
	env.setParallelism(PARALLELISM);
	env.getConfig().disableSysoutLogging();
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000));
	env.enableCheckpointing(200);

	env.setStateBackend(new FsStateBackend(tempCheckpointDir.getAbsoluteFile().toURI()));

	DataStream<Long> result = env.addSource(new SleepyDurableGenerateSequence(coordinateDir, DATA_COUNT))
			// add a non-chained no-op map to test the chain state restore logic
			.map(new MapFunction<Long, Long>() {
				@Override
				public Long map(Long value) throws Exception {
					return value;
				}
			}).startNewChain()
					// populate the coordinate directory so we can proceed to TaskManager failure
			.map(new Mapper(coordinateDir));

	//write result to temporary file
	result.addSink(new CheckpointedSink(DATA_COUNT));

	try {
		// blocking call until execution is done
		env.execute();

		// TODO: Figure out why this fails when ran with other tests
		// Check whether checkpoints have been cleaned up properly
		// assertDirectoryEmpty(tempCheckpointDir);
	}
	finally {
		// clean up
		if (tempCheckpointDir.exists()) {
			FileUtils.deleteDirectory(tempCheckpointDir);
		}
	}
}
 
Example 11
Source File: ExactlyOnceValidatingConsumerThread.java    From flink with Apache License 2.0 4 votes vote down vote up
public static Thread create(final int totalEventCount,
							final int failAtRecordCount,
							final int parallelism,
							final int checkpointInterval,
							final long restartDelay,
							final String awsAccessKey,
							final String awsSecretKey,
							final String awsRegion,
							final String kinesisStreamName,
							final AtomicReference<Throwable> errorHandler,
							final int flinkPort,
							final Configuration flinkConfig) {
	Runnable exactlyOnceValidationConsumer = new Runnable() {
		@Override
		public void run() {
			try {
				StreamExecutionEnvironment see = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort, flinkConfig);
				see.setParallelism(parallelism);
				see.enableCheckpointing(checkpointInterval);
				// we restart two times
				see.setRestartStrategy(RestartStrategies.fixedDelayRestart(2, restartDelay));

				// consuming topology
				Properties consumerProps = new Properties();
				consumerProps.setProperty(ConsumerConfigConstants.AWS_ACCESS_KEY_ID, awsAccessKey);
				consumerProps.setProperty(ConsumerConfigConstants.AWS_SECRET_ACCESS_KEY, awsSecretKey);
				consumerProps.setProperty(ConsumerConfigConstants.AWS_REGION, awsRegion);
				// start reading from beginning
				consumerProps.setProperty(ConsumerConfigConstants.STREAM_INITIAL_POSITION, ConsumerConfigConstants.InitialPosition.TRIM_HORIZON.name());
				DataStream<String> consuming = see.addSource(new FlinkKinesisConsumer<>(kinesisStreamName, new SimpleStringSchema(), consumerProps));
				consuming
					.flatMap(new ArtificialFailOnceFlatMapper(failAtRecordCount))
					// validate consumed records for correctness (use only 1 instance to validate all consumed records)
					.flatMap(new ExactlyOnceValidatingMapper(totalEventCount)).setParallelism(1);

				LOG.info("Starting consuming topology");
				tryExecute(see, "Consuming topo");
				LOG.info("Consuming topo finished");
			} catch (Exception e) {
				LOG.warn("Error while running consuming topology", e);
				errorHandler.set(e);
			}
		}
	};

	return new Thread(exactlyOnceValidationConsumer);
}
 
Example 12
Source File: TaskManagerProcessFailureStreamingRecoveryITCase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public void testTaskManagerFailure(Configuration configuration, final File coordinateDir) throws Exception {

	final File tempCheckpointDir = tempFolder.newFolder();

	StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment(
		"localhost",
		1337, // not needed since we use ZooKeeper
		configuration);
	env.setParallelism(PARALLELISM);
	env.getConfig().disableSysoutLogging();
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000));
	env.enableCheckpointing(200);

	env.setStateBackend(new FsStateBackend(tempCheckpointDir.getAbsoluteFile().toURI()));

	DataStream<Long> result = env.addSource(new SleepyDurableGenerateSequence(coordinateDir, DATA_COUNT))
			// add a non-chained no-op map to test the chain state restore logic
			.map(new MapFunction<Long, Long>() {
				@Override
				public Long map(Long value) throws Exception {
					return value;
				}
			}).startNewChain()
					// populate the coordinate directory so we can proceed to TaskManager failure
			.map(new Mapper(coordinateDir));

	//write result to temporary file
	result.addSink(new CheckpointedSink(DATA_COUNT));

	try {
		// blocking call until execution is done
		env.execute();

		// TODO: Figure out why this fails when ran with other tests
		// Check whether checkpoints have been cleaned up properly
		// assertDirectoryEmpty(tempCheckpointDir);
	}
	finally {
		// clean up
		if (tempCheckpointDir.exists()) {
			FileUtils.deleteDirectory(tempCheckpointDir);
		}
	}
}
 
Example 13
Source File: ExactlyOnceValidatingConsumerThread.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public static Thread create(final int totalEventCount,
							final int failAtRecordCount,
							final int parallelism,
							final int checkpointInterval,
							final long restartDelay,
							final String awsAccessKey,
							final String awsSecretKey,
							final String awsRegion,
							final String kinesisStreamName,
							final AtomicReference<Throwable> errorHandler,
							final int flinkPort,
							final Configuration flinkConfig) {
	Runnable exactlyOnceValidationConsumer = new Runnable() {
		@Override
		public void run() {
			try {
				StreamExecutionEnvironment see = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort, flinkConfig);
				see.setParallelism(parallelism);
				see.enableCheckpointing(checkpointInterval);
				// we restart two times
				see.setRestartStrategy(RestartStrategies.fixedDelayRestart(2, restartDelay));

				// consuming topology
				Properties consumerProps = new Properties();
				consumerProps.setProperty(ConsumerConfigConstants.AWS_ACCESS_KEY_ID, awsAccessKey);
				consumerProps.setProperty(ConsumerConfigConstants.AWS_SECRET_ACCESS_KEY, awsSecretKey);
				consumerProps.setProperty(ConsumerConfigConstants.AWS_REGION, awsRegion);
				// start reading from beginning
				consumerProps.setProperty(ConsumerConfigConstants.STREAM_INITIAL_POSITION, ConsumerConfigConstants.InitialPosition.TRIM_HORIZON.name());
				DataStream<String> consuming = see.addSource(new FlinkKinesisConsumer<>(kinesisStreamName, new SimpleStringSchema(), consumerProps));
				consuming
					.flatMap(new ArtificialFailOnceFlatMapper(failAtRecordCount))
					// validate consumed records for correctness (use only 1 instance to validate all consumed records)
					.flatMap(new ExactlyOnceValidatingMapper(totalEventCount)).setParallelism(1);

				LOG.info("Starting consuming topology");
				tryExecute(see, "Consuming topo");
				LOG.info("Consuming topo finished");
			} catch (Exception e) {
				LOG.warn("Error while running consuming topology", e);
				errorHandler.set(e);
			}
		}
	};

	return new Thread(exactlyOnceValidationConsumer);
}
 
Example 14
Source File: KinesisEventsGeneratorProducerThread.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public static Thread create(final int totalEventCount,
							final int parallelism,
							final String awsAccessKey,
							final String awsSecretKey,
							final String awsRegion,
							final String kinesisStreamName,
							final AtomicReference<Throwable> errorHandler,
							final int flinkPort,
							final Configuration flinkConfig) {
	Runnable kinesisEventsGeneratorProducer = new Runnable() {
		@Override
		public void run() {
			try {
				StreamExecutionEnvironment see = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort, flinkConfig);
				see.setParallelism(parallelism);

				// start data generator
				DataStream<String> simpleStringStream = see.addSource(new KinesisEventsGeneratorProducerThread.EventsGenerator(totalEventCount)).setParallelism(1);

				Properties producerProps = new Properties();
				producerProps.setProperty(AWSConfigConstants.AWS_ACCESS_KEY_ID, awsAccessKey);
				producerProps.setProperty(AWSConfigConstants.AWS_SECRET_ACCESS_KEY, awsSecretKey);
				producerProps.setProperty(AWSConfigConstants.AWS_REGION, awsRegion);

				FlinkKinesisProducer<String> kinesis = new FlinkKinesisProducer<>(new SimpleStringSchema(),
					producerProps);

				kinesis.setFailOnError(true);
				kinesis.setDefaultStream(kinesisStreamName);
				kinesis.setDefaultPartition("0");
				simpleStringStream.addSink(kinesis);

				LOG.info("Starting producing topology");
				see.execute("Producing topology");
				LOG.info("Producing topo finished");
			} catch (Exception e) {
				LOG.warn("Error while running producing topology", e);
				errorHandler.set(e);
			}
		}
	};

	return new Thread(kinesisEventsGeneratorProducer);
}
 
Example 15
Source File: PythonEnvironmentFactory.java    From Flink-CEPplus with Apache License 2.0 2 votes vote down vote up
/**
 * A thin wrapper layer over {@link StreamExecutionEnvironment#createRemoteEnvironment(
 *String, int, int, String...)}.
 *
 * @param host The host name or address of the master (JobManager), where the
 * program should be executed.
 * @param port The port of the master (JobManager), where the program should
 * be executed.
 * @param parallelism The parallelism to use during the execution.
 * @param jar_files The JAR files with code that needs to be shipped to the
 * cluster. If the program uses user-defined functions,
 * user-defined input formats, or any libraries, those must be
 * provided in the JAR files.
 * @return A remote environment that executes the program on a cluster.
 */
public PythonStreamExecutionEnvironment create_remote_execution_environment(
	String host, int port, int parallelism, String... jar_files) {
	return new PythonStreamExecutionEnvironment(
		StreamExecutionEnvironment.createRemoteEnvironment(host, port, parallelism, jar_files), new Path(localTmpPath), scriptName);
}
 
Example 16
Source File: PythonEnvironmentFactory.java    From Flink-CEPplus with Apache License 2.0 2 votes vote down vote up
/**
 * A thin wrapper layer over {@link StreamExecutionEnvironment#createRemoteEnvironment(
 *String, int, Configuration, String...)}.
 *
 * @param host The host name or address of the master (JobManager), where the
 * program should be executed.
 * @param port The port of the master (JobManager), where the program should
 * be executed.
 * @param config The configuration used by the client that connects to the remote cluster.
 * @param jar_files The JAR files with code that needs to be shipped to the
 * cluster. If the program uses user-defined functions,
 * user-defined input formats, or any libraries, those must be
 * provided in the JAR files.
 * @return A remote environment that executes the program on a cluster.
 */
public PythonStreamExecutionEnvironment create_remote_execution_environment(
	String host, int port, Configuration config, String... jar_files) {
	return new PythonStreamExecutionEnvironment(
		StreamExecutionEnvironment.createRemoteEnvironment(host, port, config, jar_files), new Path(localTmpPath), scriptName);
}
 
Example 17
Source File: PythonEnvironmentFactory.java    From Flink-CEPplus with Apache License 2.0 2 votes vote down vote up
/**
 * A thin wrapper layer over {@link StreamExecutionEnvironment#createRemoteEnvironment(String, int, String...)}.
 *
 * @param host The host name or address of the master (JobManager), where the
 * program should be executed.
 * @param port The port of the master (JobManager), where the program should
 * be executed.
 * @param jar_files The JAR files with code that needs to be shipped to the
 * cluster. If the program uses user-defined functions,
 * user-defined input formats, or any libraries, those must be
 * provided in the JAR files.
 * @return A remote environment that executes the program on a cluster.
 */
public PythonStreamExecutionEnvironment create_remote_execution_environment(
	String host, int port, String... jar_files) {
	return new PythonStreamExecutionEnvironment(
		StreamExecutionEnvironment.createRemoteEnvironment(host, port, jar_files), new Path(localTmpPath), scriptName);
}