Java Code Examples for org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setBufferTimeout()

The following examples show how to use org.apache.flink.streaming.api.environment.StreamExecutionEnvironment#setBufferTimeout() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StreamingScalabilityAndLatency.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private static void runPartitioningProgram(int parallelism) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.getConfig().enableObjectReuse();

	env.setBufferTimeout(5L);
	env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);

	env
		.addSource(new TimeStampingSource())
		.map(new IdMapper<Tuple2<Long, Long>>())
		.keyBy(0)
		.addSink(new TimestampingSink());

	env.execute("Partitioning Program");
}
 
Example 2
Source File: StreamingScalabilityAndLatency.java    From flink with Apache License 2.0 6 votes vote down vote up
private static void runPartitioningProgram(int parallelism) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.getConfig().enableObjectReuse();

	env.setBufferTimeout(5L);
	env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);

	env
		.addSource(new TimeStampingSource())
		.map(new IdMapper<Tuple2<Long, Long>>())
		.keyBy(0)
		.addSink(new TimestampingSink());

	env.execute("Partitioning Program");
}
 
Example 3
Source File: StreamingScalabilityAndLatency.java    From flink with Apache License 2.0 6 votes vote down vote up
private static void runPartitioningProgram(int parallelism) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(parallelism);
	env.getConfig().enableObjectReuse();

	env.setBufferTimeout(5L);
	env.enableCheckpointing(1000, CheckpointingMode.AT_LEAST_ONCE);

	env
		.addSource(new TimeStampingSource())
		.map(new IdMapper<Tuple2<Long, Long>>())
		.keyBy(0)
		.addSink(new TimestampingSink());

	env.execute("Partitioning Program");
}
 
Example 4
Source File: TwoInputBenchmark.java    From flink-benchmarks with Apache License 2.0 6 votes vote down vote up
@Benchmark
@OperationsPerInvocation(value = TwoInputBenchmark.RECORDS_PER_INVOCATION)
public void twoInputMapSink(FlinkEnvironmentContext context) throws Exception {

	StreamExecutionEnvironment env = context.env;

	env.enableCheckpointing(CHECKPOINT_INTERVAL_MS);
	env.setParallelism(1);

	// Setting buffer timeout to 1 is an attempt to improve twoInputMapSink benchmark stability.
	// Without 1ms buffer timeout, some JVM forks are much slower then others, making results
	// unstable and unreliable.
	env.setBufferTimeout(1);

	long numRecordsPerInput = RECORDS_PER_INVOCATION / 2;
	DataStreamSource<Long> source1 = env.addSource(new LongSource(numRecordsPerInput));
	DataStreamSource<Long> source2 = env.addSource(new LongSource(numRecordsPerInput));

	source1
		.connect(source2)
		.transform("custom operator", TypeInformation.of(Long.class), new MultiplyByTwoCoStreamMap())
		.addSink(new DiscardingSink<>());

	env.execute();
}
 
Example 5
Source File: BravoTestPipeline.java    From bravo with Apache License 2.0 5 votes vote down vote up
private StreamExecutionEnvironment createJobGraph(int parallelism,
		Function<DataStream<String>, DataStream<String>> pipelinerBuilder) throws Exception {
	final Path checkpointDir = getCheckpointDir();
	final Path savepointRootDir = getSavepointDir();

	checkpointDir.getFileSystem().mkdirs(checkpointDir);
	savepointRootDir.getFileSystem().mkdirs(savepointRootDir);

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.getConfig().disableSysoutLogging();
	env.getCheckpointConfig().enableExternalizedCheckpoints(ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
	env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
	env.setBufferTimeout(0);
	env.setParallelism(parallelism);
	env.enableCheckpointing(500, CheckpointingMode.EXACTLY_ONCE);

	env.setStateBackend((StateBackend) new RocksDBStateBackend(checkpointDir.toString(), true));

	DataStream<String> sourceData = env
			.addSource(new TestPipelineSource())
			.uid("TestSource")
			.name("TestSource")
			.setParallelism(1);

	pipelinerBuilder.apply(sourceData)
			.addSink(new CollectingSink()).name("Output").uid("Output")
			.setParallelism(1);

	return env;
}
 
Example 6
Source File: ExecutorUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Sets batch properties for {@link StreamExecutionEnvironment}.
 */
public static void setBatchProperties(StreamExecutionEnvironment execEnv, TableConfig tableConfig) {
	ExecutionConfig executionConfig = execEnv.getConfig();
	executionConfig.enableObjectReuse();
	executionConfig.setLatencyTrackingInterval(-1);
	execEnv.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
	execEnv.setBufferTimeout(-1);
	if (isShuffleModeAllBlocking(tableConfig)) {
		executionConfig.setDefaultInputDependencyConstraint(InputDependencyConstraint.ALL);
	}
}
 
Example 7
Source File: KafkaShuffleExactlyOnceITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
private StreamExecutionEnvironment createEnvironment(
		int producerParallelism,
		TimeCharacteristic timeCharacteristic) {
	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setParallelism(producerParallelism);
	env.setStreamTimeCharacteristic(timeCharacteristic);
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));
	env.setBufferTimeout(0);
	env.enableCheckpointing(500);

	return env;
}
 
Example 8
Source File: BatchExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Sets batch configs.
 */
private void setBatchProperties(StreamExecutionEnvironment execEnv) {
	ExecutionConfig executionConfig = execEnv.getConfig();
	executionConfig.enableObjectReuse();
	executionConfig.setLatencyTrackingInterval(-1);
	execEnv.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
	execEnv.setBufferTimeout(-1);
	if (isShuffleModeAllBatch()) {
		executionConfig.setDefaultInputDependencyConstraint(InputDependencyConstraint.ALL);
	}
}
 
Example 9
Source File: AbstractFlinkClient.java    From alchemy with Apache License 2.0 4 votes vote down vote up
private void setBaseInfo(StreamExecutionEnvironment execEnv, SqlSubmitFlinkRequest request) {
    execEnv.setParallelism(request.getParallelism());
    if (request.getMaxParallelism() != null) {
        execEnv.setMaxParallelism(request.getMaxParallelism());
    }
    if (org.apache.commons.lang3.StringUtils.isNotEmpty(request.getTimeCharacteristic())) {
        execEnv.setStreamTimeCharacteristic(TimeCharacteristic.valueOf(request.getTimeCharacteristic()));
    } else {
        execEnv.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime);
    }
    if (request.getBufferTimeout() != null) {
        execEnv.setBufferTimeout(request.getBufferTimeout());
    }
    if (org.apache.commons.lang3.StringUtils.isNotEmpty(request.getRestartStrategies())) {
        String strategies = request.getRestartStrategies();
        com.dfire.platform.alchemy.common.RestartStrategies restartStrategies
            = com.dfire.platform.alchemy.common.RestartStrategies.valueOf(strategies.toUpperCase());
        Map<String, Object> restartParams = request.getRestartParams();
        switch (restartStrategies) {
            case NO:
                execEnv.setRestartStrategy(RestartStrategies.noRestart());
                break;
            case FIXED:
                int restartAttempts = restartParams == null ? Constants.RESTART_ATTEMPTS
                    : Integer.parseInt(restartParams.get(CONFIG_KEY_RESTART_ATTEMPTS).toString());
                long delayBetweenAttempts = restartParams == null ? Constants.DELAY_BETWEEN_ATTEMPTS
                    : Long.parseLong(restartParams.get(CONFIG_KEY_DELAY_BETWEEN_ATTEMPTS).toString());
                execEnv
                    .setRestartStrategy(RestartStrategies.fixedDelayRestart(restartAttempts, delayBetweenAttempts));
                break;
            case FAILURE:
                int failureRate = restartParams == null ? Constants.FAILURE_RATE
                    : Integer.parseInt(restartParams.get(CONFIG_KEY_FAILURE_RATE).toString());
                long failureInterval = restartParams == null ? Constants.FAILURE_INTERVAL
                    : Long.parseLong(restartParams.get(CONFIG_KEY_FAILURE_INTERVAL).toString());
                long delayInterval = restartParams == null ? Constants.DELAY_INTERVAL
                    : Long.parseLong(restartParams.get(CONFIG_KEY_DELAY_INTERVAL).toString());
                execEnv.setRestartStrategy(RestartStrategies.failureRateRestart(failureRate,
                    Time.of(failureInterval, TimeUnit.MILLISECONDS),
                    Time.of(delayInterval, TimeUnit.MILLISECONDS)));
                break;
            case FALLBACK:
                execEnv.setRestartStrategy(RestartStrategies.fallBackRestart());
                break;
            default:
        }
    }
    if (request.getCheckpointCfg() != null) {
        CheckpointConfig checkpointConfig = execEnv.getCheckpointConfig();
        BeanUtils.copyProperties(request.getCheckpointCfg(), checkpointConfig);
    }

}
 
Example 10
Source File: KafkaConsumerTestBase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests the proper consumption when having more Flink sources than Kafka partitions, which means
 * that some Flink sources will read no partitions.
 */
public void runMultipleSourcesOnePartitionExactlyOnceTest() throws Exception {
	final String topic = "manyToOneTopic";
	final int numPartitions = 5;
	final int numElementsPerPartition = 1000;
	final int totalElements = numPartitions * numElementsPerPartition;
	final int failAfterElements = numElementsPerPartition / 3;

	final int parallelism = 8;

	createTestTopic(topic, numPartitions, 1);

	DataGenerators.generateRandomizedIntegerSequence(
			StreamExecutionEnvironment.getExecutionEnvironment(),
			kafkaServer,
			topic,
			numPartitions,
			numElementsPerPartition,
			true);

	// run the topology that fails and recovers

	DeserializationSchema<Integer> schema =
			new TypeInformationSerializationSchema<>(BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig());

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(500);
	env.setParallelism(parallelism);
	// set the number of restarts to one. The failing mapper will fail once, then it's only success exceptions.
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));
	env.getConfig().disableSysoutLogging();
	env.setBufferTimeout(0);

	Properties props = new Properties();
	props.putAll(standardProps);
	props.putAll(secureProps);
	FlinkKafkaConsumerBase<Integer> kafkaSource = kafkaServer.getConsumer(topic, schema, props);

	env
		.addSource(kafkaSource)
		.map(new PartitionValidatingMapper(numPartitions, 1))
		.map(new FailingIdentityMapper<Integer>(failAfterElements))
		.addSink(new ValidatingExactlyOnceSink(totalElements)).setParallelism(1);

	FailingIdentityMapper.failedBefore = false;
	tryExecute(env, "multi-source-one-partitions exactly once test");

	deleteTestTopic(topic);
}
 
Example 11
Source File: StreamGraphGeneratorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testBufferTimeout() {
	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	env.setBufferTimeout(77); // set timeout to some recognizable number

	env
		.fromElements(1, 2, 3, 4, 5)

		.map(value -> value)
			.setBufferTimeout(-1)
			.name("A")
		.map(value -> value)
			.setBufferTimeout(0)
			.name("B")
		.map(value -> value)
			.setBufferTimeout(12)
			.name("C")
		.map(value -> value)
			.name("D");

	final StreamGraph sg = env.getStreamGraph();
	for (StreamNode node : sg.getStreamNodes()) {
		switch (node.getOperatorName()) {

			case "A":
				assertEquals(77L, node.getBufferTimeout());
				break;
			case "B":
				assertEquals(0L, node.getBufferTimeout());
				break;
			case "C":
				assertEquals(12L, node.getBufferTimeout());
				break;
			case "D":
				assertEquals(77L, node.getBufferTimeout());
				break;
			default:
				assertTrue(node.getOperator() instanceof StreamSource);
		}
	}
}
 
Example 12
Source File: AdvertisingTopologyNative.java    From streaming-benchmarks with Apache License 2.0 4 votes vote down vote up
public static void main(final String[] args) throws Exception {

        ParameterTool parameterTool = ParameterTool.fromArgs(args);

        Map conf = Utils.findAndReadConfigFile(parameterTool.getRequired("confPath"), true);
        int kafkaPartitions = ((Number)conf.get("kafka.partitions")).intValue();
        int hosts = ((Number)conf.get("process.hosts")).intValue();
        int cores = ((Number)conf.get("process.cores")).intValue();

        ParameterTool flinkBenchmarkParams = ParameterTool.fromMap(getFlinkConfs(conf));

        LOG.info("conf: {}", conf);
        LOG.info("Parameters used: {}", flinkBenchmarkParams.toMap());

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.getConfig().setGlobalJobParameters(flinkBenchmarkParams);

		// Set the buffer timeout (default 100)
        // Lowering the timeout will lead to lower latencies, but will eventually reduce throughput.
        env.setBufferTimeout(flinkBenchmarkParams.getLong("flink.buffer-timeout", 100));

        if(flinkBenchmarkParams.has("flink.checkpoint-interval")) {
            // enable checkpointing for fault tolerance
            env.enableCheckpointing(flinkBenchmarkParams.getLong("flink.checkpoint-interval", 1000));
        }
        // set default parallelism for all operators (recommended value: number of available worker CPU cores in the cluster (hosts * cores))
        env.setParallelism(hosts * cores);

        DataStream<String> messageStream = env
                .addSource(new FlinkKafkaConsumer082<String>(
                        flinkBenchmarkParams.getRequired("topic"),
                        new SimpleStringSchema(),
                        flinkBenchmarkParams.getProperties())).setParallelism(Math.min(hosts * cores, kafkaPartitions));

        messageStream
                .rebalance()
                // Parse the String as JSON
                .flatMap(new DeserializeBolt())

                //Filter the records if event type is "view"
                .filter(new EventFilterBolt())

                // project the event
                .<Tuple2<String, String>>project(2, 5)

                // perform join with redis data
                .flatMap(new RedisJoinBolt())

                // process campaign
                .keyBy(0)
                .flatMap(new CampaignProcessor());


        env.execute();
    }
 
Example 13
Source File: KafkaConsumerTestBase.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests the proper consumption when having more Flink sources than Kafka partitions, which means
 * that some Flink sources will read no partitions.
 */
public void runMultipleSourcesOnePartitionExactlyOnceTest() throws Exception {
	final String topic = "manyToOneTopic";
	final int numPartitions = 5;
	final int numElementsPerPartition = 1000;
	final int totalElements = numPartitions * numElementsPerPartition;
	final int failAfterElements = numElementsPerPartition / 3;

	final int parallelism = 8;

	createTestTopic(topic, numPartitions, 1);

	DataGenerators.generateRandomizedIntegerSequence(
			StreamExecutionEnvironment.getExecutionEnvironment(),
			kafkaServer,
			topic,
			numPartitions,
			numElementsPerPartition,
			true);

	// run the topology that fails and recovers

	DeserializationSchema<Integer> schema =
			new TypeInformationSerializationSchema<>(BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig());

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(500);
	env.setParallelism(parallelism);
	// set the number of restarts to one. The failing mapper will fail once, then it's only success exceptions.
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));
			env.setBufferTimeout(0);

	Properties props = new Properties();
	props.putAll(standardProps);
	props.putAll(secureProps);
	FlinkKafkaConsumerBase<Integer> kafkaSource = kafkaServer.getConsumer(topic, schema, props);

	env
		.addSource(kafkaSource)
		.map(new PartitionValidatingMapper(numPartitions, 1))
		.map(new FailingIdentityMapper<Integer>(failAfterElements))
		.addSink(new ValidatingExactlyOnceSink(totalElements)).setParallelism(1);

	FailingIdentityMapper.failedBefore = false;
	tryExecute(env, "multi-source-one-partitions exactly once test");

	deleteTestTopic(topic);
}
 
Example 14
Source File: KafkaConsumerTestBase.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests the proper consumption when having more Flink sources than Kafka partitions, which means
 * that some Flink sources will read no partitions.
 */
public void runMultipleSourcesOnePartitionExactlyOnceTest() throws Exception {
	final String topic = "manyToOneTopic";
	final int numPartitions = 5;
	final int numElementsPerPartition = 1000;
	final int totalElements = numPartitions * numElementsPerPartition;
	final int failAfterElements = numElementsPerPartition / 3;

	final int parallelism = 8;

	createTestTopic(topic, numPartitions, 1);

	DataGenerators.generateRandomizedIntegerSequence(
			StreamExecutionEnvironment.getExecutionEnvironment(),
			kafkaServer,
			topic,
			numPartitions,
			numElementsPerPartition,
			true);

	// run the topology that fails and recovers

	DeserializationSchema<Integer> schema =
			new TypeInformationSerializationSchema<>(BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig());

	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.enableCheckpointing(500);
	env.setParallelism(parallelism);
	// set the number of restarts to one. The failing mapper will fail once, then it's only success exceptions.
	env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 0));
	env.getConfig().disableSysoutLogging();
	env.setBufferTimeout(0);

	Properties props = new Properties();
	props.putAll(standardProps);
	props.putAll(secureProps);
	FlinkKafkaConsumerBase<Integer> kafkaSource = kafkaServer.getConsumer(topic, schema, props);

	env
		.addSource(kafkaSource)
		.map(new PartitionValidatingMapper(numPartitions, 1))
		.map(new FailingIdentityMapper<Integer>(failAfterElements))
		.addSink(new ValidatingExactlyOnceSink(totalElements)).setParallelism(1);

	FailingIdentityMapper.failedBefore = false;
	tryExecute(env, "multi-source-one-partitions exactly once test");

	deleteTestTopic(topic);
}
 
Example 15
Source File: StreamGraphGeneratorTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testBufferTimeout() {
	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	env.setBufferTimeout(77); // set timeout to some recognizable number

	env
		.fromElements(1, 2, 3, 4, 5)

		.map(value -> value)
			.setBufferTimeout(-1)
			.name("A")
		.map(value -> value)
			.setBufferTimeout(0)
			.name("B")
		.map(value -> value)
			.setBufferTimeout(12)
			.name("C")
		.map(value -> value)
			.name("D");

	final StreamGraph sg = env.getStreamGraph();
	for (StreamNode node : sg.getStreamNodes()) {
		switch (node.getOperatorName()) {

			case "A":
				assertEquals(77L, node.getBufferTimeout().longValue());
				break;
			case "B":
				assertEquals(0L, node.getBufferTimeout().longValue());
				break;
			case "C":
				assertEquals(12L, node.getBufferTimeout().longValue());
				break;
			case "D":
				assertEquals(77L, node.getBufferTimeout().longValue());
				break;
			default:
				assertTrue(node.getOperator() instanceof StreamSource);
		}
	}
}
 
Example 16
Source File: StreamGraphGeneratorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testBufferTimeout() {
	final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

	env.setBufferTimeout(77); // set timeout to some recognizable number

	env
		.fromElements(1, 2, 3, 4, 5)

		.map(value -> value)
			.setBufferTimeout(-1)
			.name("A")
		.map(value -> value)
			.setBufferTimeout(0)
			.name("B")
		.map(value -> value)
			.setBufferTimeout(12)
			.name("C")
		.map(value -> value)
			.name("D");

	final StreamGraph sg = env.getStreamGraph();
	for (StreamNode node : sg.getStreamNodes()) {
		switch (node.getOperatorName()) {

			case "A":
				assertEquals(77L, node.getBufferTimeout());
				break;
			case "B":
				assertEquals(0L, node.getBufferTimeout());
				break;
			case "C":
				assertEquals(12L, node.getBufferTimeout());
				break;
			case "D":
				assertEquals(77L, node.getBufferTimeout());
				break;
			default:
				assertTrue(node.getOperator() instanceof StreamSource);
		}
	}
}
 
Example 17
Source File: Throughput.java    From flink-perf with Apache License 2.0 4 votes vote down vote up
public static void main(String[] args) throws Exception {
	final ParameterTool pt = ParameterTool.fromArgs(args);

	StreamExecutionEnvironment see = StreamExecutionEnvironment.getExecutionEnvironment();
	see.getConfig().setGlobalJobParameters(pt);
	see.setNumberOfExecutionRetries(0);

	if(pt.has("timeout")) {
		see.setBufferTimeout(pt.getLong("timeout"));
	}

	if(pt.has("ft")) {
		see.enableCheckpointing(pt.getLong("ft"));
	}

	DataStream<Type> source = see.addSource(new Source(pt) );

	DataStream<Type> repartitioned = source.partitionByHash(0);
	for(int i = 0; i < pt.getInt("repartitions", 1) - 1;i++) {
		repartitioned = repartitioned.map(new MapFunction<Type, Type>() {
			@Override
			public Type map(Type in) throws Exception {
				Type out = in.copy();
				out.f0++;
				return out;
			}
		}).partitionByHash(0);
	}
	repartitioned.flatMap(new FlatMapFunction<Type, Integer>() {
		public int host = -2;
		long received = 0;
		long start = 0;
		long logfreq = pt.getInt("logfreq");
		long lastLog = -1;
		long lastElements = 0;

		@Override
		public void flatMap(Type element, Collector<Integer> collector) throws Exception {
			if(host == -2) {
				host = convertHostnameToInt(InetAddress.getLocalHost().getHostName());
			}
			if (start == 0) {
				start = System.currentTimeMillis();
			}

			received++;
			if (received % logfreq == 0) {
				// throughput over entire time
				long now = System.currentTimeMillis();
				long sinceSec = ((now - start) / 1000);
				if (sinceSec == 0) return;
				LOG.info("Received {} elements since {}. Elements per second {}, GB received {}",
						received,
						sinceSec,
						received / sinceSec,
						(received * (8 + 8 + 4 + pt.getInt("payload"))) / 1024 / 1024 / 1024);

				// throughput for the last "logfreq" elements
				if(lastLog == -1) {
					// init (the first)
					lastLog = now;
					lastElements = received;
				} else {
					long timeDiff = now - lastLog;
					long elementDiff = received - lastElements;
					double ex = (1000/(double)timeDiff);
					LOG.info("During the last {} ms, we received {} elements. That's {} elements/second/core", timeDiff, elementDiff, elementDiff*ex);
					// reinit
					lastLog = now;
					lastElements = received;
				}
			}
			if (element.f2 != 0 /* && element.f1.equals(host) */) {
				long lat = System.currentTimeMillis() - element.f2;
				LOG.info("Latency {} ms from machine " + element.f1, lat);
			}
		}
	});
	//System.out.println("plan = "+see.getExecutionPlan());;
	see.execute("Flink Throughput Job with: "+pt.toMap());
}