org.apache.flink.streaming.api.operators.StreamingRuntimeContext Java Examples

The following examples show how to use org.apache.flink.streaming.api.operators.StreamingRuntimeContext. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FlinkKafkaConsumerBaseTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected AbstractFetcher<T, ?> createFetcher(
		SourceContext<T> sourceContext,
		Map<KafkaTopicPartition, Long> thisSubtaskPartitionsWithStartOffsets,
		SerializedValue<WatermarkStrategy<T>> watermarkStrategy,
		StreamingRuntimeContext runtimeContext,
		OffsetCommitMode offsetCommitMode,
		MetricGroup consumerMetricGroup,
		boolean useMetrics) throws Exception {
	return new TestingFetcher<T, String>(
			sourceContext,
			thisSubtaskPartitionsWithStartOffsets,
			watermarkStrategy,
			runtimeContext.getProcessingTimeService(),
			0L,
			getClass().getClassLoader(),
			consumerMetricGroup,
			useMetrics);
}
 
Example #2
Source File: BucketingSink.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);

	state = new State<>();

	processingTimeService =
			((StreamingRuntimeContext) getRuntimeContext()).getProcessingTimeService();

	long currentProcessingTime = processingTimeService.getCurrentProcessingTime();

	processingTimeService.registerTimer(currentProcessingTime + inactiveBucketCheckInterval, this);

	this.clock = new Clock() {
		@Override
		public long currentTimeMillis() {
			return processingTimeService.getCurrentProcessingTime();
		}
	};
}
 
Example #3
Source File: SourceFunctionUtil.java    From flink with Apache License 2.0 6 votes vote down vote up
private static <T extends Serializable> List<T> runRichSourceFunction(SourceFunction<T> sourceFunction) throws Exception {
	try (MockEnvironment environment =
			new MockEnvironmentBuilder()
				.setTaskName("MockTask")
				.setManagedMemorySize(3 * 1024 * 1024)
				.setInputSplitProvider(new MockInputSplitProvider())
				.setBufferSize(1024)
				.build()) {

		AbstractStreamOperator<?> operator = mock(AbstractStreamOperator.class);
		when(operator.getExecutionConfig()).thenReturn(new ExecutionConfig());

		RuntimeContext runtimeContext = new StreamingRuntimeContext(
			operator,
			environment,
			new HashMap<>());
		((RichFunction) sourceFunction).setRuntimeContext(runtimeContext);
		((RichFunction) sourceFunction).open(new Configuration());

		return runNonRichSourceFunction(sourceFunction);
	}
}
 
Example #4
Source File: KafkaConsumer08Test.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateSourceWithoutCluster() {
	try {
		Properties props = new Properties();
		props.setProperty("zookeeper.connect", "localhost:56794");
		props.setProperty("bootstrap.servers", "localhost:11111, localhost:22222");
		props.setProperty("group.id", "non-existent-group");
		props.setProperty(FlinkKafkaConsumer08.GET_PARTITIONS_RETRIES_KEY, "1");

		FlinkKafkaConsumer08<String> consumer = new FlinkKafkaConsumer08<>(
			Collections.singletonList("no op topic"), new SimpleStringSchema(), props);
		StreamingRuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class);
		Mockito.when(mockRuntimeContext.isCheckpointingEnabled()).thenReturn(true);
		consumer.setRuntimeContext(mockRuntimeContext);

		consumer.open(new Configuration());

		fail();
	}
	catch (Exception e) {
		assertTrue(e.getMessage().contains("Unable to retrieve any partitions"));
	}
}
 
Example #5
Source File: AbstractParallelSourceBase.java    From alibaba-flink-connectors with Apache License 2.0 6 votes vote down vote up
@Override
public void open(Configuration config) throws IOException {
	this.initOperator(config);

	StreamingRuntimeContext context = (StreamingRuntimeContext) this.getRuntimeContext();
	if (null != context && null != context.getExecutionConfig()
		&& null != context.getExecutionConfig().getGlobalJobParameters()) {
		Map<String, String> globalParametersMap = context.getExecutionConfig().getGlobalJobParameters().toMap();
		if (null != globalParametersMap && globalParametersMap.size() != 0) {
			for (String s : globalParametersMap.keySet()) {
				config.setString(s, globalParametersMap.get(s));
			}
		}
	}

	if (disableParallelRead) {
		createSequenceReader(config);
	} else {
		createParallelReader(config);
	}

	LOG.info("Init source succ.");
}
 
Example #6
Source File: CollectSinkFunction.java    From flink with Apache License 2.0 6 votes vote down vote up
private InetAddress getBindAddress() {
	RuntimeContext context = getRuntimeContext();
	Preconditions.checkState(
		context instanceof StreamingRuntimeContext,
		"CollectSinkFunction can only be used in StreamTask");
	StreamingRuntimeContext streamingContext = (StreamingRuntimeContext) context;
	String bindAddress = streamingContext.getTaskManagerRuntimeInfo().getTaskManagerBindAddress();

	if (bindAddress != null) {
		try {
			return InetAddress.getByName(bindAddress);
		} catch (UnknownHostException e) {
			return null;
		}
	}
	return null;
}
 
Example #7
Source File: SourceFunctionUtil.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private static <T extends Serializable> List<T> runRichSourceFunction(SourceFunction<T> sourceFunction) throws Exception {
	try (MockEnvironment environment =
			new MockEnvironmentBuilder()
				.setTaskName("MockTask")
				.setMemorySize(3 * 1024 * 1024)
				.setInputSplitProvider(new MockInputSplitProvider())
				.setBufferSize(1024)
				.build()) {

		AbstractStreamOperator<?> operator = mock(AbstractStreamOperator.class);
		when(operator.getExecutionConfig()).thenReturn(new ExecutionConfig());

		RuntimeContext runtimeContext = new StreamingRuntimeContext(
			operator,
			environment,
			new HashMap<>());
		((RichFunction) sourceFunction).setRuntimeContext(runtimeContext);
		((RichFunction) sourceFunction).open(new Configuration());

		return runNonRichSourceFunction(sourceFunction);
	}
}
 
Example #8
Source File: BucketingSink.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);

	state = new State<>();

	processingTimeService =
			((StreamingRuntimeContext) getRuntimeContext()).getProcessingTimeService();

	long currentProcessingTime = processingTimeService.getCurrentProcessingTime();

	processingTimeService.registerTimer(currentProcessingTime + inactiveBucketCheckInterval, this);

	this.clock = new Clock() {
		@Override
		public long currentTimeMillis() {
			return processingTimeService.getCurrentProcessingTime();
		}
	};
}
 
Example #9
Source File: FlinkPulsarRowSource.java    From pulsar-flink with Apache License 2.0 6 votes vote down vote up
@Override
protected PulsarFetcher<Row> createFetcher(
        SourceContext sourceContext,
        Map<String, MessageId> seedTopicsWithInitialOffsets,
        SerializedValue<AssignerWithPeriodicWatermarks<Row>> watermarksPeriodic,
        SerializedValue<AssignerWithPunctuatedWatermarks<Row>> watermarksPunctuated,
        ProcessingTimeService processingTimeProvider,
        long autoWatermarkInterval,
        ClassLoader userCodeClassLoader,
        StreamingRuntimeContext streamingRuntime) throws Exception {

    return new PulsarRowFetcher(
            sourceContext,
            seedTopicsWithInitialOffsets,
            watermarksPeriodic,
            watermarksPunctuated,
            processingTimeProvider,
            autoWatermarkInterval,
            userCodeClassLoader,
            streamingRuntime,
            clientConfigurationData,
            readerConf,
            pollTimeoutMs,
            null,
metadataReader);
}
 
Example #10
Source File: BucketingSink.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {
	super.open(parameters);

	state = new State<>();

	processingTimeService =
			((StreamingRuntimeContext) getRuntimeContext()).getProcessingTimeService();

	long currentProcessingTime = processingTimeService.getCurrentProcessingTime();

	processingTimeService.registerTimer(currentProcessingTime + inactiveBucketCheckInterval, this);

	this.clock = new Clock() {
		@Override
		public long currentTimeMillis() {
			return processingTimeService.getCurrentProcessingTime();
		}
	};
}
 
Example #11
Source File: RocketMQSink.java    From flink-learning with Apache License 2.0 6 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {
    Validate.notEmpty(props, "Producer properties can not be empty");
    Validate.notNull(topicSelector, "TopicSelector can not be null");
    Validate.notNull(serializationSchema, "KeyValueSerializationSchema can not be null");

    producer = new DefaultMQProducer();
    producer.setInstanceName(String.valueOf(getRuntimeContext().getIndexOfThisSubtask()));
    RocketMQConfig.buildProducerConfigs(props, producer);

    batchList = new LinkedList<>();

    if (batchFlushOnCheckpoint && !((StreamingRuntimeContext) getRuntimeContext()).isCheckpointingEnabled()) {
        LOG.warn("Flushing on checkpoint is enabled, but checkpointing is not enabled. Disabling flushing.");
        batchFlushOnCheckpoint = false;
    }

    try {
        producer.start();
    } catch (MQClientException e) {
        throw new RuntimeException(e);
    }
}
 
Example #12
Source File: FlinkPulsarSinkBase.java    From pulsar-flink with Apache License 2.0 6 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {
    if (flushOnCheckpoint && !((StreamingRuntimeContext) this.getRuntimeContext()).isCheckpointingEnabled()) {
        log.warn("Flushing on checkpoint is enabled, but checkpointing is not enabled. Disabling flushing.");
        flushOnCheckpoint = false;
    }

    admin = PulsarAdminUtils.newAdminFromConf(adminUrl, clientConfigurationData);

    if (forcedTopic) {
        uploadSchema(defaultTopic);
        singleProducer = createProducer(clientConfigurationData, producerConf, defaultTopic, getPulsarSchema());
    } else {
        topic2Producer = new HashMap<>();
    }
}
 
Example #13
Source File: KafkaConsumer08Test.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Test
public void testCreateSourceWithoutCluster() {
	try {
		Properties props = new Properties();
		props.setProperty("zookeeper.connect", "localhost:56794");
		props.setProperty("bootstrap.servers", "localhost:11111, localhost:22222");
		props.setProperty("group.id", "non-existent-group");
		props.setProperty(FlinkKafkaConsumer08.GET_PARTITIONS_RETRIES_KEY, "1");

		FlinkKafkaConsumer08<String> consumer = new FlinkKafkaConsumer08<>(
			Collections.singletonList("no op topic"), new SimpleStringSchema(), props);
		StreamingRuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class);
		Mockito.when(mockRuntimeContext.isCheckpointingEnabled()).thenReturn(true);
		consumer.setRuntimeContext(mockRuntimeContext);

		consumer.open(new Configuration());

		fail();
	}
	catch (Exception e) {
		assertTrue(e.getMessage().contains("Unable to retrieve any partitions"));
	}
}
 
Example #14
Source File: RocketMQSink.java    From flink-learning with Apache License 2.0 6 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {
    Validate.notEmpty(props, "Producer properties can not be empty");
    Validate.notNull(topicSelector, "TopicSelector can not be null");
    Validate.notNull(serializationSchema, "KeyValueSerializationSchema can not be null");

    producer = new DefaultMQProducer();
    producer.setInstanceName(String.valueOf(getRuntimeContext().getIndexOfThisSubtask()));
    RocketMQConfig.buildProducerConfigs(props, producer);

    batchList = new LinkedList<>();

    if (batchFlushOnCheckpoint && !((StreamingRuntimeContext) getRuntimeContext()).isCheckpointingEnabled()) {
        LOG.warn("Flushing on checkpoint is enabled, but checkpointing is not enabled. Disabling flushing.");
        batchFlushOnCheckpoint = false;
    }

    try {
        producer.start();
    } catch (MQClientException e) {
        throw new RuntimeException(e);
    }
}
 
Example #15
Source File: InputFormatSourceFunction.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
@SuppressWarnings("unchecked")
public void open(Configuration parameters) throws Exception {
	StreamingRuntimeContext context = (StreamingRuntimeContext) getRuntimeContext();

	if (format instanceof RichInputFormat) {
		((RichInputFormat) format).setRuntimeContext(context);
	}
	format.configure(parameters);

	provider = context.getInputSplitProvider();
	serializer = typeInfo.createSerializer(getRuntimeContext().getExecutionConfig());
	splitIterator = getInputSplits();
	isRunning = splitIterator.hasNext();
}
 
Example #16
Source File: KuduSink.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
/**
 * If flink checkpoint is disable,synchronously write data to kudu.
 * <p>If flink checkpoint is enable, asynchronously write data to kudu by default.
 *
 * <p>(Note: async may result in out-of-order writes to Kudu.
 *  you also can change to sync by explicitly calling {@link KuduSink#withSyncFlushMode()} when initializing KuduSink. )
 *
 * @return flushMode
 */
private FlushMode getflushMode() {
    FlushMode flushMode = FlushMode.AUTO_FLUSH_SYNC;
    boolean enableCheckpoint = ((StreamingRuntimeContext) getRuntimeContext()).isCheckpointingEnabled();
    if(enableCheckpoint && this.flushMode == null) {
        flushMode = FlushMode.AUTO_FLUSH_BACKGROUND;
    }
    if(enableCheckpoint && this.flushMode != null) {
        flushMode = this.flushMode;
    }
    return flushMode;
}
 
Example #17
Source File: PulsarConsumerSource.java    From pulsar with Apache License 2.0 5 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {
    super.open(parameters);

    final RuntimeContext context = getRuntimeContext();
    if (context instanceof StreamingRuntimeContext) {
        isCheckpointingEnabled = ((StreamingRuntimeContext) context).isCheckpointingEnabled();
    }

    client = getClient();
    consumer = createConsumer(client);

    isRunning = true;
}
 
Example #18
Source File: SeedUrlSource.java    From flink-crawler with Apache License 2.0 5 votes vote down vote up
@Override
public void open(Configuration parameters) throws Exception {
    super.open(parameters);

    StreamingRuntimeContext context = (StreamingRuntimeContext) getRuntimeContext();
    int parallelism = context.getNumberOfParallelSubtasks();
    if (parallelism != 1) {
        throw new IllegalStateException("SeedUrlSource only supports a parallelism of 1");
    }

    if (_terminator == null) {
        throw new IllegalStateException("Crawl terminator must be set for the seed URL source");
    }
    
    LOGGER.info("Opening seed URL source");

    // Open the terminator, so that it knows when we really started running.
    _terminator.open();
    
    _urlIndex = 0;
    
    if (useS3File()) {
        AmazonS3 s3Client = S3Utils.makeS3Client();
        S3Object object = s3Client
                .getObject(new GetObjectRequest(_seedUrlsS3Bucket, _seedUrlsS3Path));
        _s3FileStream = object.getObjectContent();
    }
}
 
Example #19
Source File: InputFormatSourceFunction.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
@SuppressWarnings("unchecked")
public void open(Configuration parameters) throws Exception {
	StreamingRuntimeContext context = (StreamingRuntimeContext) getRuntimeContext();

	if (format instanceof RichInputFormat) {
		((RichInputFormat) format).setRuntimeContext(context);
	}
	format.configure(parameters);

	provider = context.getInputSplitProvider();
	serializer = typeInfo.createSerializer(getRuntimeContext().getExecutionConfig());
	splitIterator = getInputSplits();
	isRunning = splitIterator.hasNext();
}
 
Example #20
Source File: FlinkPulsarSourceTest.java    From pulsar-flink with Apache License 2.0 5 votes vote down vote up
@Override
protected PulsarFetcher<T> createFetcher(
        SourceContext sourceContext,
        Map<String, MessageId> seedTopicsWithInitialOffsets,
        SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
        SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
        ProcessingTimeService processingTimeProvider,
        long autoWatermarkInterval,
        ClassLoader userCodeClassLoader,
        StreamingRuntimeContext streamingRuntime) throws Exception {
    return testFetcherSupplier.get();
}
 
Example #21
Source File: FlinkKafkaConsumer010.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected AbstractFetcher<T, ?> createFetcher(
		SourceContext<T> sourceContext,
		Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets,
		SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
		SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
		StreamingRuntimeContext runtimeContext,
		OffsetCommitMode offsetCommitMode,
		MetricGroup consumerMetricGroup,
		boolean useMetrics) throws Exception {

	// make sure that auto commit is disabled when our offset commit mode is ON_CHECKPOINTS;
	// this overwrites whatever setting the user configured in the properties
	adjustAutoCommitConfig(properties, offsetCommitMode);

	FlinkConnectorRateLimiter rateLimiter = super.getRateLimiter();
	// If a rateLimiter is set, then call rateLimiter.open() with the runtime context.
	if (rateLimiter != null) {
		rateLimiter.open(runtimeContext);
	}

	return new Kafka010Fetcher<>(
			sourceContext,
			assignedPartitionsWithInitialOffsets,
			watermarksPeriodic,
			watermarksPunctuated,
			runtimeContext.getProcessingTimeService(),
			runtimeContext.getExecutionConfig().getAutoWatermarkInterval(),
			runtimeContext.getUserCodeClassLoader(),
			runtimeContext.getTaskNameWithSubtasks(),
			deserializer,
			properties,
			pollTimeout,
			runtimeContext.getMetricGroup(),
			consumerMetricGroup,
			useMetrics,
			rateLimiter);
}
 
Example #22
Source File: AMQSource.java    From bahir-flink with Apache License 2.0 5 votes vote down vote up
@Override
public void open(Configuration config) throws Exception {
    super.open(config);
    // Create a Connection
    connection = connectionFactory.createConnection();
    connection.start();

    exceptionListener = new AMQExceptionListener(LOG, logFailuresOnly);
    connection.setExceptionListener(exceptionListener);

    RuntimeContext runtimeContext = getRuntimeContext();
    int acknowledgeType;
    if (runtimeContext instanceof StreamingRuntimeContext
        && ((StreamingRuntimeContext) runtimeContext).isCheckpointingEnabled()) {
        autoAck = false;
        acknowledgeType = ActiveMQSession.INDIVIDUAL_ACKNOWLEDGE;
    } else {
        autoAck = true;
        acknowledgeType = ActiveMQSession.AUTO_ACKNOWLEDGE;
    }
    // Create a Session
    session = connection.createSession(false, acknowledgeType);

    // Create the destination (Topic or Queue)
    Destination destination = AMQUtil.getDestination(session, destinationType, destinationName);

    // Create a MessageConsumer from the Session to the Topic or
    // Queue
    consumer = session.createConsumer(destination);
    runningChecker.setIsRunning(true);
}
 
Example #23
Source File: NonBufferOverWindowOperatorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void test(boolean[] resetAccumulators, GenericRow[] expect) throws Exception {
	operator = new NonBufferOverWindowOperator(functions, comparator, resetAccumulators) {
		{
			output = new ConsumerOutput(new Consumer<BaseRow>() {
				@Override
				public void accept(BaseRow r) {
					collect.add(GenericRow.of(r.getInt(0), r.getLong(1),
							r.getLong(2), r.getLong(3), r.getLong(4)));
				}
			});
		}

		@Override
		public ClassLoader getUserCodeClassloader() {
			return Thread.currentThread().getContextClassLoader();
		}

		@Override
		public StreamConfig getOperatorConfig() {
			StreamConfig conf = mock(StreamConfig.class);
			when(conf.<BaseRow>getTypeSerializerIn1(getUserCodeClassloader()))
					.thenReturn(inputSer);
			return conf;
		}

		@Override
		public StreamingRuntimeContext getRuntimeContext() {
			return mock(StreamingRuntimeContext.class);
		}
	};
	operator.open();
	addRow(0, 1L, 4L);
	addRow(0, 1L, 1L);
	addRow(1, 5L, 2L);
	addRow(2, 5L, 4L);
	addRow(2, 6L, 2L);
	GenericRow[] outputs = this.collect.toArray(new GenericRow[0]);
	Assert.assertArrayEquals(expect, outputs);
}
 
Example #24
Source File: KafkaConsumer08Test.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testAllBoostrapServerHostsAreInvalid() {
	try {
		String unknownHost = "foobar:11111";

		URL unknownHostURL = NetUtils.getCorrectHostnamePort(unknownHost);

		PowerMockito.mockStatic(InetAddress.class);
		when(InetAddress.getByName(Matchers.eq(unknownHostURL.getHost()))).thenThrow(new UnknownHostException("Test exception"));

		String zookeeperConnect = "localhost:56794";
		String groupId = "non-existent-group";
		Properties props = createKafkaProps(zookeeperConnect, unknownHost, groupId);

		FlinkKafkaConsumer08<String> consumer = new FlinkKafkaConsumer08<>(
			Collections.singletonList("no op topic"), new SimpleStringSchema(), props);
		StreamingRuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class);
		Mockito.when(mockRuntimeContext.isCheckpointingEnabled()).thenReturn(true);
		consumer.setRuntimeContext(mockRuntimeContext);

		consumer.open(new Configuration());

		fail();
	} catch (Exception expected) {
		assertTrue("Exception should be thrown containing 'all bootstrap servers invalid' message!",
				expected.getMessage().contains("All the servers provided in: '" + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG
						+ "' config are invalid"));
	}
}
 
Example #25
Source File: FlinkKafkaConsumer08.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected AbstractFetcher<T, ?> createFetcher(
		SourceContext<T> sourceContext,
		Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets,
		SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
		SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
		StreamingRuntimeContext runtimeContext,
		OffsetCommitMode offsetCommitMode,
		MetricGroup consumerMetricGroup,
		boolean useMetrics) throws Exception {

	long autoCommitInterval = (offsetCommitMode == OffsetCommitMode.KAFKA_PERIODIC)
			? PropertiesUtil.getLong(kafkaProperties, "auto.commit.interval.ms", 60000)
			: -1; // this disables the periodic offset committer thread in the fetcher

	return new Kafka08Fetcher<>(
			sourceContext,
			assignedPartitionsWithInitialOffsets,
			watermarksPeriodic,
			watermarksPunctuated,
			runtimeContext,
			deserializer,
			kafkaProperties,
			autoCommitInterval,
			consumerMetricGroup,
			useMetrics);
}
 
Example #26
Source File: Kafka08Fetcher.java    From flink with Apache License 2.0 5 votes vote down vote up
public Kafka08Fetcher(
		SourceContext<T> sourceContext,
		Map<KafkaTopicPartition, Long> seedPartitionsWithInitialOffsets,
		SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
		SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
		StreamingRuntimeContext runtimeContext,
		KafkaDeserializationSchema<T> deserializer,
		Properties kafkaProperties,
		long autoCommitInterval,
		MetricGroup consumerMetricGroup,
		boolean useMetrics) throws Exception {
	super(
			sourceContext,
			seedPartitionsWithInitialOffsets,
			watermarksPeriodic,
			watermarksPunctuated,
			runtimeContext.getProcessingTimeService(),
			runtimeContext.getExecutionConfig().getAutoWatermarkInterval(),
			runtimeContext.getUserCodeClassLoader(),
			consumerMetricGroup,
			useMetrics);

	this.deserializer = checkNotNull(deserializer);
	this.kafkaConfig = checkNotNull(kafkaProperties);
	this.runtimeContext = runtimeContext;
	this.invalidOffsetBehavior = getInvalidOffsetBehavior(kafkaProperties);
	this.autoCommitInterval = autoCommitInterval;
}
 
Example #27
Source File: NonBufferOverWindowOperatorTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private void test(boolean[] resetAccumulators, GenericRowData[] expect) throws Exception {
	operator = new NonBufferOverWindowOperator(functions, comparator, resetAccumulators) {
		{
			output = new ConsumerOutput(new Consumer<RowData>() {
				@Override
				public void accept(RowData r) {
					collect.add(GenericRowData.of(r.getInt(0), r.getLong(1),
							r.getLong(2), r.getLong(3), r.getLong(4)));
				}
			});
		}

		@Override
		public ClassLoader getUserCodeClassloader() {
			return Thread.currentThread().getContextClassLoader();
		}

		@Override
		public StreamConfig getOperatorConfig() {
			StreamConfig conf = mock(StreamConfig.class);
			when(conf.<RowData>getTypeSerializerIn1(getUserCodeClassloader()))
					.thenReturn(inputSer);
			return conf;
		}

		@Override
		public StreamingRuntimeContext getRuntimeContext() {
			return mock(StreamingRuntimeContext.class);
		}
	};
	operator.open();
	addRow(0, 1L, 4L);
	addRow(0, 1L, 1L);
	addRow(1, 5L, 2L);
	addRow(2, 5L, 4L);
	addRow(2, 6L, 2L);
	GenericRowData[] outputs = this.collect.toArray(new GenericRowData[0]);
	Assert.assertArrayEquals(expect, outputs);
}
 
Example #28
Source File: KuduSink.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
/**
 * If flink checkpoint is disable,synchronously write data to kudu.
 * <p>If flink checkpoint is enable, asynchronously write data to kudu by default.
 *
 * <p>(Note: async may result in out-of-order writes to Kudu.
 *  you also can change to sync by explicitly calling {@link KuduSink#withSyncFlushMode()} when initializing KuduSink. )
 *
 * @return flushMode
 */
private FlushMode getflushMode() {
    FlushMode flushMode = FlushMode.AUTO_FLUSH_SYNC;
    boolean enableCheckpoint = ((StreamingRuntimeContext) getRuntimeContext()).isCheckpointingEnabled();
    if(enableCheckpoint && this.flushMode == null) {
        flushMode = FlushMode.AUTO_FLUSH_BACKGROUND;
    }
    if(enableCheckpoint && this.flushMode != null) {
        flushMode = this.flushMode;
    }
    return flushMode;
}
 
Example #29
Source File: UnboundedSourceWrapper.java    From flink-dataflow with Apache License 2.0 5 votes vote down vote up
private void setNextWatermarkTimer(StreamingRuntimeContext runtime) {
	if (this.isRunning) {
		long watermarkInterval =  runtime.getExecutionConfig().getAutoWatermarkInterval();
		long timeToNextWatermark = getTimeToNextWaternark(watermarkInterval);
		runtime.registerTimer(timeToNextWatermark, this);
	}
}
 
Example #30
Source File: FlinkKafkaConsumer09.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected AbstractFetcher<T, ?> createFetcher(
		SourceContext<T> sourceContext,
		Map<KafkaTopicPartition, Long> assignedPartitionsWithInitialOffsets,
		SerializedValue<AssignerWithPeriodicWatermarks<T>> watermarksPeriodic,
		SerializedValue<AssignerWithPunctuatedWatermarks<T>> watermarksPunctuated,
		StreamingRuntimeContext runtimeContext,
		OffsetCommitMode offsetCommitMode,
		MetricGroup consumerMetricGroup,
		boolean useMetrics) throws Exception {

	// make sure that auto commit is disabled when our offset commit mode is ON_CHECKPOINTS;
	// this overwrites whatever setting the user configured in the properties
	adjustAutoCommitConfig(properties, offsetCommitMode);

	// If a rateLimiter is set, then call rateLimiter.open() with the runtime context.
	if (rateLimiter != null) {
		rateLimiter.open(runtimeContext);
	}

	return new Kafka09Fetcher<>(
			sourceContext,
			assignedPartitionsWithInitialOffsets,
			watermarksPeriodic,
			watermarksPunctuated,
			runtimeContext.getProcessingTimeService(),
			runtimeContext.getExecutionConfig().getAutoWatermarkInterval(),
			runtimeContext.getUserCodeClassLoader(),
			runtimeContext.getTaskNameWithSubtasks(),
			deserializer,
			properties,
			pollTimeout,
			runtimeContext.getMetricGroup(),
			consumerMetricGroup,
			useMetrics,
			rateLimiter);
}