Java Code Examples for org.apache.flink.api.common.functions.RuntimeContext
The following examples show how to use
org.apache.flink.api.common.functions.RuntimeContext. These examples are extracted from open source projects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: flink Source File: NiFiSinkTopologyExample.java License: Apache License 2.0 | 7 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); SiteToSiteClientConfig clientConfig = new SiteToSiteClient.Builder() .url("http://localhost:8080/nifi") .portName("Data from Flink") .buildConfig(); DataStreamSink<String> dataStream = env.fromElements("one", "two", "three", "four", "five", "q") .addSink(new NiFiSink<>(clientConfig, new NiFiDataPacketBuilder<String>() { @Override public NiFiDataPacket createNiFiDataPacket(String s, RuntimeContext ctx) { return new StandardNiFiDataPacket(s.getBytes(ConfigConstants.DEFAULT_CHARSET), new HashMap<String, String>()); } })); env.execute(); }
Example 2
Source Project: flink Source File: MapOperatorBase.java License: Apache License 2.0 | 6 votes |
@Override protected List<OUT> executeOnCollections(List<IN> inputData, RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception { MapFunction<IN, OUT> function = this.userFunction.getUserCodeObject(); FunctionUtils.setFunctionRuntimeContext(function, ctx); FunctionUtils.openFunction(function, this.parameters); ArrayList<OUT> result = new ArrayList<OUT>(inputData.size()); TypeSerializer<IN> inSerializer = getOperatorInfo().getInputType().createSerializer(executionConfig); TypeSerializer<OUT> outSerializer = getOperatorInfo().getOutputType().createSerializer(executionConfig); for (IN element : inputData) { IN inCopy = inSerializer.copy(element); OUT out = function.map(inCopy); result.add(outSerializer.copy(out)); } FunctionUtils.closeFunction(function); return result; }
Example 3
Source Project: Flink-CEPplus Source File: KinesisDataFetcher.java License: Apache License 2.0 | 6 votes |
/** * Creates a Kinesis Data Fetcher. * * @param streams the streams to subscribe to * @param sourceContext context of the source function * @param runtimeContext this subtask's runtime context * @param configProps the consumer configuration properties * @param deserializationSchema deserialization schema */ public KinesisDataFetcher(List<String> streams, SourceFunction.SourceContext<T> sourceContext, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<T> deserializationSchema, KinesisShardAssigner shardAssigner, AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner, WatermarkTracker watermarkTracker) { this(streams, sourceContext, sourceContext.getCheckpointLock(), runtimeContext, configProps, deserializationSchema, shardAssigner, periodicWatermarkAssigner, watermarkTracker, new AtomicReference<>(), new ArrayList<>(), createInitialSubscribedStreamsToLastDiscoveredShardsState(streams), KinesisProxy::create); }
Example 4
Source Project: da-streamingledger Source File: SerialTransactor.java License: Apache License 2.0 | 6 votes |
@Override public void open(Configuration parameters) throws Exception { super.open(parameters); final RuntimeContext runtimeContext = getRuntimeContext(); SingleStreamSerialTransactor<Object, Object>[] transactors = newSingleStreamSerialTransactorArray(specs.size()); // initialize the individual transactors for (int streamTag = 0; streamTag < specs.size(); streamTag++) { StreamingLedgerSpec<?, ?> aSpec = specs.get(streamTag); OutputTag<?> aTag = sideOutputs.get(streamTag); transactors[streamTag] = singleStreamSerialTransactorFromSpec(aSpec, aTag, collector, runtimeContext); } this.transactors = transactors; }
Example 5
Source Project: flink Source File: OperatorStateInputFormat.java License: Apache License 2.0 | 6 votes |
private static OperatorStateBackend createOperatorStateBackend( RuntimeContext runtimeContext, Collection<OperatorStateHandle> stateHandles, CloseableRegistry cancelStreamRegistry){ try { return new DefaultOperatorStateBackendBuilder( runtimeContext.getUserCodeClassLoader(), runtimeContext.getExecutionConfig(), false, stateHandles, cancelStreamRegistry ).build(); } catch (BackendBuildingException e) { throw new RuntimeException(e); } }
Example 6
Source Project: flink Source File: SavepointEnvironment.java License: Apache License 2.0 | 6 votes |
private SavepointEnvironment(RuntimeContext ctx, Configuration configuration, int maxParallelism, int indexOfSubtask, PrioritizedOperatorSubtaskState prioritizedOperatorSubtaskState) { this.jobID = new JobID(); this.vertexID = new JobVertexID(); this.attemptID = new ExecutionAttemptID(); this.ctx = Preconditions.checkNotNull(ctx); this.configuration = Preconditions.checkNotNull(configuration); Preconditions.checkArgument(maxParallelism > 0 && indexOfSubtask < maxParallelism); this.maxParallelism = maxParallelism; this.indexOfSubtask = indexOfSubtask; this.registry = new KvStateRegistry().createTaskRegistry(jobID, vertexID); this.taskStateManager = new SavepointTaskStateManager(prioritizedOperatorSubtaskState); this.ioManager = new IOManagerAsync(); this.accumulatorRegistry = new AccumulatorRegistry(jobID, attemptID); }
Example 7
Source Project: bahir-flink Source File: FlumeSinkTest.java License: Apache License 2.0 | 6 votes |
@Test public void testSink() throws Exception { StreamExecutionEnvironment environment = StreamExecutionEnvironment.getExecutionEnvironment(); FlumeEventBuilder<String> flumeEventBuilder = new FlumeEventBuilder<String>() { @Override public Event createFlumeEvent(String value, RuntimeContext ctx) { return EventBuilder.withBody(value, Charset.forName("UTF-8")); } }; FlumeSink<String> flumeSink = new FlumeSink<>("default", "172.25.0.3", 44444, flumeEventBuilder, 1, 1, 1); environment.fromElements("string1", "string2").addSink(flumeSink); tryExecute(environment, "FlumeTest"); }
Example 8
Source Project: flink Source File: MapOperatorBase.java License: Apache License 2.0 | 6 votes |
@Override protected List<OUT> executeOnCollections(List<IN> inputData, RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception { MapFunction<IN, OUT> function = this.userFunction.getUserCodeObject(); FunctionUtils.setFunctionRuntimeContext(function, ctx); FunctionUtils.openFunction(function, this.parameters); ArrayList<OUT> result = new ArrayList<OUT>(inputData.size()); TypeSerializer<IN> inSerializer = getOperatorInfo().getInputType().createSerializer(executionConfig); TypeSerializer<OUT> outSerializer = getOperatorInfo().getOutputType().createSerializer(executionConfig); for (IN element : inputData) { IN inCopy = inSerializer.copy(element); OUT out = function.map(inCopy); result.add(outSerializer.copy(out)); } FunctionUtils.closeFunction(function); return result; }
Example 9
Source Project: flink Source File: DynamoDBStreamsDataFetcher.java License: Apache License 2.0 | 6 votes |
/** * Constructor. * * @param streams list of streams to fetch data * @param sourceContext source context * @param runtimeContext runtime context * @param configProps config properties * @param deserializationSchema deserialization schema * @param shardAssigner shard assigner */ public DynamoDBStreamsDataFetcher(List<String> streams, SourceFunction.SourceContext<T> sourceContext, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<T> deserializationSchema, KinesisShardAssigner shardAssigner) { super(streams, sourceContext, sourceContext.getCheckpointLock(), runtimeContext, configProps, deserializationSchema, shardAssigner, null, null, new AtomicReference<>(), new ArrayList<>(), createInitialSubscribedStreamsToLastDiscoveredShardsState(streams), // use DynamoDBStreamsProxy DynamoDBStreamsProxy::create); }
Example 10
Source Project: flink Source File: JoinRecordStateViews.java License: Apache License 2.0 | 6 votes |
/** * Creates a {@link JoinRecordStateView} depends on {@link JoinInputSideSpec}. */ public static JoinRecordStateView create( RuntimeContext ctx, String stateName, JoinInputSideSpec inputSideSpec, RowDataTypeInfo recordType, long retentionTime) { StateTtlConfig ttlConfig = createTtlConfig(retentionTime); if (inputSideSpec.hasUniqueKey()) { if (inputSideSpec.joinKeyContainsUniqueKey()) { return new JoinKeyContainsUniqueKey(ctx, stateName, recordType, ttlConfig); } else { return new InputSideHasUniqueKey( ctx, stateName, recordType, inputSideSpec.getUniqueKeyType(), inputSideSpec.getUniqueKeySelector(), ttlConfig); } } else { return new InputSideHasNoUniqueKey(ctx, stateName, recordType, ttlConfig); } }
Example 11
Source Project: flink Source File: SortPartitionOperatorBase.java License: Apache License 2.0 | 6 votes |
@Override protected List<IN> executeOnCollections(List<IN> inputData, RuntimeContext runtimeContext, ExecutionConfig executionConfig) { TypeInformation<IN> inputType = getInput().getOperatorInfo().getOutputType(); int[] sortColumns = this.partitionOrdering.getFieldPositions(); boolean[] sortOrderings = this.partitionOrdering.getFieldSortDirections(); final TypeComparator<IN> sortComparator; if (inputType instanceof CompositeType) { sortComparator = ((CompositeType<IN>) inputType).createComparator(sortColumns, sortOrderings, 0, executionConfig); } else if (inputType instanceof AtomicType) { sortComparator = ((AtomicType) inputType).createComparator(sortOrderings[0], executionConfig); } else { throw new UnsupportedOperationException("Partition sorting does not support type "+inputType+" yet."); } Collections.sort(inputData, new Comparator<IN>() { @Override public int compare(IN o1, IN o2) { return sortComparator.compare(o1, o2); } }); return inputData; }
Example 12
Source Project: alibaba-flink-connectors Source File: DatahubRecordReader.java License: Apache License 2.0 | 5 votes |
@Override public void open(InputSplit split, RuntimeContext context) throws IOException { this.currentSplit = (DatahubShardInputSplit) split; this.shardId = this.currentSplit.getShardId(); for (ShardEntry shardEntry:initShardList){ if (shardId.equalsIgnoreCase(shardEntry.getShardId())){ shard = shardEntry; shardState = shard.getState(); break; } } GetCursorResult cursorResult = this.clientProvider.getClient().getCursor( this.topic.getProjectName(), this.topic.getTopicName(), this.shardId, CursorType.SYSTEM_TIME, this.currentSplit.getStartTime().getTime()); this.cursor = cursorResult.getCursor(); this.sequence = cursorResult.getSequence(); if (stopInMs != Long.MAX_VALUE) { cursorResult = this.clientProvider.getClient().getCursor( this.topic.getProjectName(), this.topic.getTopicName(), this.shardId, CursorType.SYSTEM_TIME, stopInMs); this.stopCursor = cursorResult.getCursor(); } LOG.info("Open " + String.valueOf(split) + ": sequence-" + sequence + " using startInMs" + String.valueOf(this.currentSplit.getStartTime().getTime()) + " Cursor:" + cursor); initPartitionNumsListener(); }
Example 13
Source Project: flink Source File: ReplicatingInputFormat.java License: Apache License 2.0 | 5 votes |
@Override public RuntimeContext getRuntimeContext(){ if (this.replicatedIF instanceof RichInputFormat) { return ((RichInputFormat)this.replicatedIF).getRuntimeContext(); } else{ throw new RuntimeException("The underlying input format to this ReplicatingInputFormat isn't context aware"); } }
Example 14
Source Project: alibaba-flink-connectors Source File: ParallelReader.java License: Apache License 2.0 | 5 votes |
public ParallelReader( RuntimeContext context, Configuration config, long watermarkInterval, boolean tracingMetricEnabled, int sampleInterval) { this.context = context; this.config = config; this.watermarkInterval = watermarkInterval; splitPipeLen = config.getInteger(SPLIT_PIPE_LEN_CONFIG, 10); idleInterval = config.getInteger(IDLE_INTERVAL_CONFIG, 10); LOG.info("idleInterval:" + idleInterval); LOG.info("splitPipeLen:" + splitPipeLen); context.getMetricGroup().gauge(MetricUtils.METRICS_DELAY, new DelayGauge(readerRunners, DelayKind.DELAY)); context.getMetricGroup().gauge(MetricUtils.METRICS_FETCHED_DELAY, new DelayGauge(readerRunners, DelayKind.FETCHED_DELAY)); context.getMetricGroup().gauge(MetricUtils.METRICS_NO_DATA_DELAY, new DelayGauge(readerRunners, DelayKind.NO_DATA_DELAY)); outputCounter = context.getMetricGroup().counter(MetricUtils.METRICS_TPS + "_counter", new SimpleCounter()); tpsMetric = context.getMetricGroup().meter(MetricUtils.METRICS_TPS, new MeterView(outputCounter, 60)); this.tracingMetricEnabled = tracingMetricEnabled; this.sampleInterval = sampleInterval; if (this.tracingMetricEnabled) { partitionLatency = new SumAndCount(MetricUtils.METRICS_SOURCE_PARTITION_LATENCY, context.getMetricGroup()); processLatency = new SumAndCount(MetricUtils.METRICS_SOURCE_PROCESS_LATENCY, context.getMetricGroup()); partitionCount = context.getMetricGroup().gauge(MetricUtils.METRICS_SOURCE_PARTITION_COUNT, new Gauge<Integer>() { @Override public Integer getValue() { int count = 0; for (ReaderRunner runner : readerRunners) { if (!runner.finished) { count++; } } return count; } }); } }
Example 15
Source Project: Flink-CEPplus Source File: NFA.java License: Apache License 2.0 | 5 votes |
/** * Initialization method for the NFA. It is called before any element is passed and thus suitable for one time setup * work. * @param cepRuntimeContext runtime context of the enclosing operator * @param conf The configuration containing the parameters attached to the contract. */ public void open(RuntimeContext cepRuntimeContext, Configuration conf) throws Exception { for (State<T> state : getStates()) { for (StateTransition<T> transition : state.getStateTransitions()) { IterativeCondition condition = transition.getCondition(); FunctionUtils.setFunctionRuntimeContext(condition, cepRuntimeContext); FunctionUtils.openFunction(condition, conf); } } }
Example 16
Source Project: alibaba-flink-connectors Source File: MetricUtils.java License: Apache License 2.0 | 5 votes |
public static Meter registerOutBps(RuntimeContext context, String connectorType) { Counter bpsCounter = context.getMetricGroup().addGroup(METRIC_GROUP_SINK) .counter(METRICS_SINK_OUT_BPS + "_counter", new SimpleCounter()); String tag = ""; if (!StringUtils.isNullOrWhitespaceOnly(connectorType)) { tag = ":" + METRICS_TAG_CONNECTOR_TYPE + "=" + connectorType; } return context.getMetricGroup().addGroup(METRIC_GROUP_SINK) .meter(METRICS_SINK_OUT_BPS + tag, new MeterView(bpsCounter, 60)); }
Example 17
Source Project: flink Source File: SavepointEnvironment.java License: Apache License 2.0 | 5 votes |
public Builder(RuntimeContext ctx, int maxParallelism) { this.ctx = Preconditions.checkNotNull(ctx); Preconditions.checkArgument(maxParallelism > 0); this.maxParallelism = maxParallelism; this.prioritizedOperatorSubtaskState = PrioritizedOperatorSubtaskState.emptyNotRestored(); this.configuration = new Configuration(); this.indexOfSubtask = ctx.getIndexOfThisSubtask(); }
Example 18
Source Project: flink Source File: FlinkKafkaProducerBaseTest.java License: Apache License 2.0 | 5 votes |
/** * Tests that partitions list is determinate and correctly provided to custom partitioner. */ @SuppressWarnings("unchecked") @Test public void testPartitionerInvokedWithDeterminatePartitionList() throws Exception { FlinkKafkaPartitioner<String> mockPartitioner = mock(FlinkKafkaPartitioner.class); RuntimeContext mockRuntimeContext = mock(StreamingRuntimeContext.class); when(mockRuntimeContext.getIndexOfThisSubtask()).thenReturn(0); when(mockRuntimeContext.getNumberOfParallelSubtasks()).thenReturn(1); // out-of-order list of 4 partitions List<PartitionInfo> mockPartitionsList = new ArrayList<>(4); mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 3, null, null, null)); mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 1, null, null, null)); mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 0, null, null, null)); mockPartitionsList.add(new PartitionInfo(DummyFlinkKafkaProducer.DUMMY_TOPIC, 2, null, null, null)); final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), mockPartitioner); producer.setRuntimeContext(mockRuntimeContext); final KafkaProducer mockProducer = producer.getMockKafkaProducer(); when(mockProducer.partitionsFor(anyString())).thenReturn(mockPartitionsList); when(mockProducer.metrics()).thenReturn(null); producer.open(new Configuration()); verify(mockPartitioner, times(1)).open(0, 1); producer.invoke("foobar", SinkContextUtil.forTimestamp(0)); verify(mockPartitioner, times(1)).partition( "foobar", null, "foobar".getBytes(), DummyFlinkKafkaProducer.DUMMY_TOPIC, new int[] {0, 1, 2, 3}); }
Example 19
Source Project: Flink-CEPplus Source File: ContinuousFileProcessingTest.java License: Apache License 2.0 | 5 votes |
/** * Create continuous monitoring function with 1 reader-parallelism and interval: {@link #INTERVAL}. */ private <OUT> ContinuousFileMonitoringFunction<OUT> createTestContinuousFileMonitoringFunction(FileInputFormat<OUT> format, FileProcessingMode fileProcessingMode) { ContinuousFileMonitoringFunction<OUT> monitoringFunction = new ContinuousFileMonitoringFunction<>(format, fileProcessingMode, 1, INTERVAL); monitoringFunction.setRuntimeContext(Mockito.mock(RuntimeContext.class)); return monitoringFunction; }
Example 20
Source Project: Flink-CEPplus Source File: OutputFormatSinkFunction.java License: Apache License 2.0 | 5 votes |
@Override public void open(Configuration parameters) throws Exception { RuntimeContext context = getRuntimeContext(); format.configure(parameters); int indexInSubtaskGroup = context.getIndexOfThisSubtask(); int currentNumberOfSubtasks = context.getNumberOfParallelSubtasks(); format.open(indexInSubtaskGroup, currentNumberOfSubtasks); }
Example 21
Source Project: alchemy Source File: MetricFunction.java License: Apache License 2.0 | 5 votes |
default Counter createOrGet(Counter numRecordsOut, RuntimeContext runtimeContext) { if (numRecordsOut == null) { MetricGroup metricGroup = runtimeContext.getMetricGroup().addGroup(metricGroupName()); numRecordsOut = metricGroup.counter(MetricNames.IO_NUM_RECORDS_OUT); metricGroup.meter(MetricNames.IO_NUM_RECORDS_OUT_RATE, new MeterView(numRecordsOut, 60)); } return numRecordsOut; }
Example 22
Source Project: flink Source File: KinesisDataFetcher.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting protected KinesisDataFetcher(List<String> streams, SourceFunction.SourceContext<T> sourceContext, Object checkpointLock, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<T> deserializationSchema, KinesisShardAssigner shardAssigner, AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner, WatermarkTracker watermarkTracker, AtomicReference<Throwable> error, List<KinesisStreamShardState> subscribedShardsState, HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds, FlinkKinesisProxyFactory kinesisProxyFactory) { this.streams = checkNotNull(streams); this.configProps = checkNotNull(configProps); this.sourceContext = checkNotNull(sourceContext); this.checkpointLock = checkNotNull(checkpointLock); this.runtimeContext = checkNotNull(runtimeContext); this.totalNumberOfConsumerSubtasks = runtimeContext.getNumberOfParallelSubtasks(); this.indexOfThisConsumerSubtask = runtimeContext.getIndexOfThisSubtask(); this.deserializationSchema = checkNotNull(deserializationSchema); this.shardAssigner = checkNotNull(shardAssigner); this.periodicWatermarkAssigner = periodicWatermarkAssigner; this.watermarkTracker = watermarkTracker; this.kinesisProxyFactory = checkNotNull(kinesisProxyFactory); this.kinesis = kinesisProxyFactory.create(configProps); this.consumerMetricGroup = runtimeContext.getMetricGroup() .addGroup(KinesisConsumerMetricConstants.KINESIS_CONSUMER_METRICS_GROUP); this.error = checkNotNull(error); this.subscribedShardsState = checkNotNull(subscribedShardsState); this.subscribedStreamsToLastDiscoveredShardIds = checkNotNull(subscribedStreamsToLastDiscoveredShardIds); this.shardConsumersExecutor = createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks()); this.recordEmitter = createRecordEmitter(configProps); }
Example 23
Source Project: Flink-CEPplus Source File: FlinkDynamoDBStreamsConsumer.java License: Apache License 2.0 | 5 votes |
@Override protected KinesisDataFetcher<T> createFetcher( List<String> streams, SourceFunction.SourceContext<T> sourceContext, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<T> deserializationSchema) { return new DynamoDBStreamsDataFetcher<T>( streams, sourceContext, runtimeContext, configProps, deserializationSchema, getShardAssigner()); }
Example 24
Source Project: Flink-CEPplus Source File: WatermarkTracker.java License: Apache License 2.0 | 5 votes |
public void open(RuntimeContext context) { if (context instanceof StreamingRuntimeContext) { this.subtaskId = ((StreamingRuntimeContext) context).getOperatorUniqueID() + "-" + context.getIndexOfThisSubtask(); } else { this.subtaskId = context.getTaskNameWithSubtasks(); } }
Example 25
Source Project: flink Source File: KinesisDataFetcher.java License: Apache License 2.0 | 5 votes |
@VisibleForTesting protected KinesisDataFetcher(List<String> streams, SourceFunction.SourceContext<T> sourceContext, Object checkpointLock, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<T> deserializationSchema, KinesisShardAssigner shardAssigner, AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner, WatermarkTracker watermarkTracker, AtomicReference<Throwable> error, List<KinesisStreamShardState> subscribedShardsState, HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds, FlinkKinesisProxyFactory kinesisProxyFactory) { this.streams = checkNotNull(streams); this.configProps = checkNotNull(configProps); this.sourceContext = checkNotNull(sourceContext); this.checkpointLock = checkNotNull(checkpointLock); this.runtimeContext = checkNotNull(runtimeContext); this.totalNumberOfConsumerSubtasks = runtimeContext.getNumberOfParallelSubtasks(); this.indexOfThisConsumerSubtask = runtimeContext.getIndexOfThisSubtask(); this.deserializationSchema = checkNotNull(deserializationSchema); this.shardAssigner = checkNotNull(shardAssigner); this.periodicWatermarkAssigner = periodicWatermarkAssigner; this.watermarkTracker = watermarkTracker; this.kinesisProxyFactory = checkNotNull(kinesisProxyFactory); this.kinesis = kinesisProxyFactory.create(configProps); this.consumerMetricGroup = runtimeContext.getMetricGroup() .addGroup(KinesisConsumerMetricConstants.KINESIS_CONSUMER_METRICS_GROUP); this.error = checkNotNull(error); this.subscribedShardsState = checkNotNull(subscribedShardsState); this.subscribedStreamsToLastDiscoveredShardIds = checkNotNull(subscribedStreamsToLastDiscoveredShardIds); this.shardConsumersExecutor = createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks()); this.recordEmitter = createRecordEmitter(configProps); }
Example 26
Source Project: flink Source File: RichCompositeIterativeCondition.java License: Apache License 2.0 | 5 votes |
@Override public void setRuntimeContext(RuntimeContext t) { super.setRuntimeContext(t); for (IterativeCondition<T> nestedCondition : nestedConditions) { FunctionUtils.setFunctionRuntimeContext(nestedCondition, t); } }
Example 27
Source Project: flink Source File: FlinkKinesisConsumerMigrationTest.java License: Apache License 2.0 | 5 votes |
@Override protected KinesisDataFetcher<T> createFetcher( List<String> streams, SourceContext<T> sourceContext, RuntimeContext runtimeContext, Properties configProps, KinesisDeserializationSchema<T> deserializer) { return mockFetcher; }
Example 28
Source Project: Flink-CEPplus Source File: RMQSource.java License: Apache License 2.0 | 5 votes |
@Override public void open(Configuration config) throws Exception { super.open(config); ConnectionFactory factory = setupConnectionFactory(); try { connection = factory.newConnection(); channel = connection.createChannel(); if (channel == null) { throw new RuntimeException("None of RabbitMQ channels are available"); } setupQueue(); consumer = new QueueingConsumer(channel); RuntimeContext runtimeContext = getRuntimeContext(); if (runtimeContext instanceof StreamingRuntimeContext && ((StreamingRuntimeContext) runtimeContext).isCheckpointingEnabled()) { autoAck = false; // enables transaction mode channel.txSelect(); } else { autoAck = true; } LOG.debug("Starting RabbitMQ source with autoAck status: " + autoAck); channel.basicConsume(queueName, autoAck, consumer); } catch (IOException e) { throw new RuntimeException("Cannot create RMQ connection with " + queueName + " at " + rmqConnectionConfig.getHost(), e); } running = true; }
Example 29
Source Project: flink Source File: RichAsyncFunction.java License: Apache License 2.0 | 5 votes |
@Override public void setRuntimeContext(RuntimeContext runtimeContext) { Preconditions.checkNotNull(runtimeContext); if (runtimeContext instanceof IterationRuntimeContext) { super.setRuntimeContext( new RichAsyncFunctionIterationRuntimeContext( (IterationRuntimeContext) runtimeContext)); } else { super.setRuntimeContext(new RichAsyncFunctionRuntimeContext(runtimeContext)); } }
Example 30
Source Project: flink Source File: JdbcFullTest.java License: Apache License 2.0 | 5 votes |
@Test public void testEnrichedClassCastException() { String expectedMsg = "field index: 3, field value: 11.11."; try { JdbcBatchingOutputFormat jdbcOutputFormat = JdbcBatchingOutputFormat.builder() .setOptions(JdbcOptions.builder() .setDBUrl(getDbMetadata().getUrl()) .setTableName(OUTPUT_TABLE) .build()) .setFieldNames(new String[]{"id", "title", "author", "price", "qty"}) .setFieldTypes(new int[]{Types.INTEGER, Types.VARCHAR, Types.VARCHAR, Types.DOUBLE, Types.INTEGER}) .setKeyFields(null) .build(); RuntimeContext context = Mockito.mock(RuntimeContext.class); ExecutionConfig config = Mockito.mock(ExecutionConfig.class); doReturn(config).when(context).getExecutionConfig(); doReturn(true).when(config).isObjectReuseEnabled(); jdbcOutputFormat.setRuntimeContext(context); jdbcOutputFormat.open(1, 1); Row inputRow = Row.of(1001, "Java public for dummies", "Tan Ah Teck", "11.11", 11); jdbcOutputFormat.writeRecord(Tuple2.of(true, inputRow)); jdbcOutputFormat.close(); } catch (Exception e) { assertTrue(findThrowable(e, ClassCastException.class).isPresent()); assertTrue(findThrowableWithMessage(e, expectedMsg).isPresent()); } }