org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState Java Examples

The following examples show how to use org.apache.flink.streaming.connectors.kinesis.model.KinesisStreamShardState. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KinesisDataFetcher.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Registers a metric group associated with the shard id of the provided {@link KinesisStreamShardState shardState}.
 *
 * @return a {@link ShardMetricsReporter} that can be used to update metric values
 */
private static ShardMetricsReporter registerShardMetrics(MetricGroup metricGroup, KinesisStreamShardState shardState) {
	ShardMetricsReporter shardMetrics = new ShardMetricsReporter();

	MetricGroup streamShardMetricGroup = metricGroup
		.addGroup(
			KinesisConsumerMetricConstants.STREAM_METRICS_GROUP,
			shardState.getStreamShardHandle().getStreamName())
		.addGroup(
			KinesisConsumerMetricConstants.SHARD_METRICS_GROUP,
			shardState.getStreamShardHandle().getShard().getShardId());

	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.MILLIS_BEHIND_LATEST_GAUGE, shardMetrics::getMillisBehindLatest);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.MAX_RECORDS_PER_FETCH, shardMetrics::getMaxNumberOfRecordsPerFetch);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.NUM_AGGREGATED_RECORDS_PER_FETCH, shardMetrics::getNumberOfAggregatedRecords);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.NUM_DEAGGREGATED_RECORDS_PER_FETCH, shardMetrics::getNumberOfDeaggregatedRecords);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.AVG_RECORD_SIZE_BYTES, shardMetrics::getAverageRecordSizeBytes);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.BYTES_PER_READ, shardMetrics::getBytesPerRead);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.RUNTIME_LOOP_NANOS, shardMetrics::getRunLoopTimeNanos);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.LOOP_FREQUENCY_HZ, shardMetrics::getLoopFrequencyHz);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.SLEEP_TIME_MILLIS, shardMetrics::getSleepTimeMillis);
	return shardMetrics;
}
 
Example #2
Source File: KinesisDataFetcher.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Registers a metric group associated with the shard id of the provided {@link KinesisStreamShardState shardState}.
 *
 * @return a {@link ShardMetricsReporter} that can be used to update metric values
 */
private static ShardMetricsReporter registerShardMetrics(MetricGroup metricGroup, KinesisStreamShardState shardState) {
	ShardMetricsReporter shardMetrics = new ShardMetricsReporter();

	MetricGroup streamShardMetricGroup = metricGroup
		.addGroup(
			KinesisConsumerMetricConstants.STREAM_METRICS_GROUP,
			shardState.getStreamShardHandle().getStreamName())
		.addGroup(
			KinesisConsumerMetricConstants.SHARD_METRICS_GROUP,
			shardState.getStreamShardHandle().getShard().getShardId());

	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.MILLIS_BEHIND_LATEST_GAUGE, shardMetrics::getMillisBehindLatest);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.MAX_RECORDS_PER_FETCH, shardMetrics::getMaxNumberOfRecordsPerFetch);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.NUM_AGGREGATED_RECORDS_PER_FETCH, shardMetrics::getNumberOfAggregatedRecords);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.NUM_DEAGGREGATED_RECORDS_PER_FETCH, shardMetrics::getNumberOfDeaggregatedRecords);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.AVG_RECORD_SIZE_BYTES, shardMetrics::getAverageRecordSizeBytes);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.BYTES_PER_READ, shardMetrics::getBytesPerRead);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.RUNTIME_LOOP_NANOS, shardMetrics::getRunLoopTimeNanos);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.LOOP_FREQUENCY_HZ, shardMetrics::getLoopFrequencyHz);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.SLEEP_TIME_MILLIS, shardMetrics::getSleepTimeMillis);
	return shardMetrics;
}
 
Example #3
Source File: KinesisDataFetcher.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Registers a metric group associated with the shard id of the provided {@link KinesisStreamShardState shardState}.
 *
 * @return a {@link ShardMetricsReporter} that can be used to update metric values
 */
private static ShardMetricsReporter registerShardMetrics(MetricGroup metricGroup, KinesisStreamShardState shardState) {
	ShardMetricsReporter shardMetrics = new ShardMetricsReporter();

	MetricGroup streamShardMetricGroup = metricGroup
		.addGroup(
			KinesisConsumerMetricConstants.STREAM_METRICS_GROUP,
			shardState.getStreamShardHandle().getStreamName())
		.addGroup(
			KinesisConsumerMetricConstants.SHARD_METRICS_GROUP,
			shardState.getStreamShardHandle().getShard().getShardId());

	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.MILLIS_BEHIND_LATEST_GAUGE, shardMetrics::getMillisBehindLatest);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.MAX_RECORDS_PER_FETCH, shardMetrics::getMaxNumberOfRecordsPerFetch);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.NUM_AGGREGATED_RECORDS_PER_FETCH, shardMetrics::getNumberOfAggregatedRecords);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.NUM_DEAGGREGATED_RECORDS_PER_FETCH, shardMetrics::getNumberOfDeaggregatedRecords);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.AVG_RECORD_SIZE_BYTES, shardMetrics::getAverageRecordSizeBytes);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.BYTES_PER_READ, shardMetrics::getBytesPerRead);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.RUNTIME_LOOP_NANOS, shardMetrics::getRunLoopTimeNanos);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.LOOP_FREQUENCY_HZ, shardMetrics::getLoopFrequencyHz);
	streamShardMetricGroup.gauge(KinesisConsumerMetricConstants.SLEEP_TIME_MILLIS, shardMetrics::getSleepTimeMillis);
	return shardMetrics;
}
 
Example #4
Source File: TestableKinesisDataFetcherForShardConsumerException.java    From flink with Apache License 2.0 5 votes vote down vote up
public TestableKinesisDataFetcherForShardConsumerException(final List<String> fakeStreams,
		final SourceFunction.SourceContext<T> sourceContext,
		final Properties fakeConfiguration,
		final KinesisDeserializationSchema<T> deserializationSchema,
		final int fakeTotalCountOfSubtasks,
		final int fakeIndexOfThisSubtask,
		final AtomicReference<Throwable> thrownErrorUnderTest,
		final LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest,
		final HashMap<String, String> subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
		final KinesisProxyInterface fakeKinesis) {
	super(fakeStreams, sourceContext, fakeConfiguration, deserializationSchema, fakeTotalCountOfSubtasks,
		fakeIndexOfThisSubtask, thrownErrorUnderTest, subscribedShardsStateUnderTest,
		subscribedStreamsToLastDiscoveredShardIdsStateUnderTest, fakeKinesis);
}
 
Example #5
Source File: ShardConsumerTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testMetricsReporting() {
	StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);

	LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
	subscribedShardsStateUnderTest.add(
		new KinesisStreamShardState(
			KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
			fakeToBeConsumedShard,
			new SequenceNumber("fakeStartingState")));

	TestSourceContext<String> sourceContext = new TestSourceContext<>();

	TestableKinesisDataFetcher<String> fetcher =
		new TestableKinesisDataFetcher<>(
			Collections.singletonList("fakeStream"),
			sourceContext,
			new Properties(),
			new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
			10,
			2,
			new AtomicReference<>(),
			subscribedShardsStateUnderTest,
			KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
			Mockito.mock(KinesisProxyInterface.class));

	ShardMetricsReporter shardMetricsReporter = new ShardMetricsReporter();
	long millisBehindLatest = 500L;
	new ShardConsumer<>(
		fetcher,
		0,
		subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
		FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9, millisBehindLatest),
		shardMetricsReporter).run();

	// the millisBehindLatest metric should have been reported
	assertEquals(millisBehindLatest, shardMetricsReporter.getMillisBehindLatest());
}
 
Example #6
Source File: ShardConsumerTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Test
public void testCorrectNumOfCollectedRecordsAndUpdatedState() {
	StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);

	LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
	subscribedShardsStateUnderTest.add(
		new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
			fakeToBeConsumedShard, new SequenceNumber("fakeStartingState")));

	TestSourceContext<String> sourceContext = new TestSourceContext<>();

	TestableKinesisDataFetcher<String> fetcher =
		new TestableKinesisDataFetcher<>(
			Collections.singletonList("fakeStream"),
			sourceContext,
			new Properties(),
			new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
			10,
			2,
			new AtomicReference<>(),
			subscribedShardsStateUnderTest,
			KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
			Mockito.mock(KinesisProxyInterface.class));

	int shardIndex = fetcher.registerNewSubscribedShardState(subscribedShardsStateUnderTest.get(0));
	new ShardConsumer<>(
		fetcher,
		shardIndex,
		subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
		FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9, 500L),
		new ShardMetricsReporter()).run();

	assertEquals(1000, sourceContext.getCollectedOutputs().size());
	assertEquals(
		SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum());
}
 
Example #7
Source File: KinesisDataFetcher.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
protected KinesisDataFetcher(List<String> streams,
							SourceFunction.SourceContext<T> sourceContext,
							Object checkpointLock,
							RuntimeContext runtimeContext,
							Properties configProps,
							KinesisDeserializationSchema<T> deserializationSchema,
							KinesisShardAssigner shardAssigner,
							AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner,
							WatermarkTracker watermarkTracker,
							AtomicReference<Throwable> error,
							List<KinesisStreamShardState> subscribedShardsState,
							HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds,
							FlinkKinesisProxyFactory kinesisProxyFactory) {
	this.streams = checkNotNull(streams);
	this.configProps = checkNotNull(configProps);
	this.sourceContext = checkNotNull(sourceContext);
	this.checkpointLock = checkNotNull(checkpointLock);
	this.runtimeContext = checkNotNull(runtimeContext);
	this.totalNumberOfConsumerSubtasks = runtimeContext.getNumberOfParallelSubtasks();
	this.indexOfThisConsumerSubtask = runtimeContext.getIndexOfThisSubtask();
	this.deserializationSchema = checkNotNull(deserializationSchema);
	this.shardAssigner = checkNotNull(shardAssigner);
	this.periodicWatermarkAssigner = periodicWatermarkAssigner;
	this.watermarkTracker = watermarkTracker;
	this.kinesisProxyFactory = checkNotNull(kinesisProxyFactory);
	this.kinesis = kinesisProxyFactory.create(configProps);

	this.consumerMetricGroup = runtimeContext.getMetricGroup()
		.addGroup(KinesisConsumerMetricConstants.KINESIS_CONSUMER_METRICS_GROUP);

	this.error = checkNotNull(error);
	this.subscribedShardsState = checkNotNull(subscribedShardsState);
	this.subscribedStreamsToLastDiscoveredShardIds = checkNotNull(subscribedStreamsToLastDiscoveredShardIds);

	this.shardConsumersExecutor =
		createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks());
	this.recordEmitter = createRecordEmitter(configProps);
}
 
Example #8
Source File: TestableKinesisDataFetcher.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public TestableKinesisDataFetcher(
		List<String> fakeStreams,
		SourceFunction.SourceContext<T> sourceContext,
		Properties fakeConfiguration,
		KinesisDeserializationSchema<T> deserializationSchema,
		int fakeTotalCountOfSubtasks,
		int fakeIndexOfThisSubtask,
		AtomicReference<Throwable> thrownErrorUnderTest,
		LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest,
		HashMap<String, String> subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
		KinesisProxyInterface fakeKinesis) {
	super(
		fakeStreams,
		sourceContext,
		sourceContext.getCheckpointLock(),
		getMockedRuntimeContext(fakeTotalCountOfSubtasks, fakeIndexOfThisSubtask),
		fakeConfiguration,
		deserializationSchema,
		DEFAULT_SHARD_ASSIGNER,
		null,
		null,
		thrownErrorUnderTest,
		subscribedShardsStateUnderTest,
		subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
		(properties) -> fakeKinesis);

	this.runWaiter = new OneShotLatch();
	this.initialDiscoveryWaiter = new OneShotLatch();
	this.shutdownWaiter = new OneShotLatch();

	this.running = true;
}
 
Example #9
Source File: TestableKinesisDataFetcherForShardConsumerException.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public TestableKinesisDataFetcherForShardConsumerException(final List<String> fakeStreams,
		final SourceFunction.SourceContext<T> sourceContext,
		final Properties fakeConfiguration,
		final KinesisDeserializationSchema<T> deserializationSchema,
		final int fakeTotalCountOfSubtasks,
		final int fakeIndexOfThisSubtask,
		final AtomicReference<Throwable> thrownErrorUnderTest,
		final LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest,
		final HashMap<String, String> subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
		final KinesisProxyInterface fakeKinesis) {
	super(fakeStreams, sourceContext, fakeConfiguration, deserializationSchema, fakeTotalCountOfSubtasks,
		fakeIndexOfThisSubtask, thrownErrorUnderTest, subscribedShardsStateUnderTest,
		subscribedStreamsToLastDiscoveredShardIdsStateUnderTest, fakeKinesis);
}
 
Example #10
Source File: KinesisDataFetcher.java    From flink with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
protected KinesisDataFetcher(List<String> streams,
							SourceFunction.SourceContext<T> sourceContext,
							Object checkpointLock,
							RuntimeContext runtimeContext,
							Properties configProps,
							KinesisDeserializationSchema<T> deserializationSchema,
							KinesisShardAssigner shardAssigner,
							AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner,
							WatermarkTracker watermarkTracker,
							AtomicReference<Throwable> error,
							List<KinesisStreamShardState> subscribedShardsState,
							HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds,
							FlinkKinesisProxyFactory kinesisProxyFactory) {
	this.streams = checkNotNull(streams);
	this.configProps = checkNotNull(configProps);
	this.sourceContext = checkNotNull(sourceContext);
	this.checkpointLock = checkNotNull(checkpointLock);
	this.runtimeContext = checkNotNull(runtimeContext);
	this.totalNumberOfConsumerSubtasks = runtimeContext.getNumberOfParallelSubtasks();
	this.indexOfThisConsumerSubtask = runtimeContext.getIndexOfThisSubtask();
	this.deserializationSchema = checkNotNull(deserializationSchema);
	this.shardAssigner = checkNotNull(shardAssigner);
	this.periodicWatermarkAssigner = periodicWatermarkAssigner;
	this.watermarkTracker = watermarkTracker;
	this.kinesisProxyFactory = checkNotNull(kinesisProxyFactory);
	this.kinesis = kinesisProxyFactory.create(configProps);

	this.consumerMetricGroup = runtimeContext.getMetricGroup()
		.addGroup(KinesisConsumerMetricConstants.KINESIS_CONSUMER_METRICS_GROUP);

	this.error = checkNotNull(error);
	this.subscribedShardsState = checkNotNull(subscribedShardsState);
	this.subscribedStreamsToLastDiscoveredShardIds = checkNotNull(subscribedStreamsToLastDiscoveredShardIds);

	this.shardConsumersExecutor =
		createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks());
	this.recordEmitter = createRecordEmitter(configProps);
}
 
Example #11
Source File: KinesisDataFetcher.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Register a new subscribed shard state.
 *
 * @param newSubscribedShardState the new shard state that this fetcher is to be subscribed to
 */
public int registerNewSubscribedShardState(KinesisStreamShardState newSubscribedShardState) {
	synchronized (checkpointLock) {
		subscribedShardsState.add(newSubscribedShardState);

		// If a registered shard has initial state that is not SENTINEL_SHARD_ENDING_SEQUENCE_NUM (will be the case
		// if the consumer had already finished reading a shard before we failed and restored), we determine that
		// this subtask has a new active shard
		if (!newSubscribedShardState.getLastProcessedSequenceNum().equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
			this.numberOfActiveShards.incrementAndGet();
		}

		int shardStateIndex = subscribedShardsState.size() - 1;

		// track all discovered shards for watermark determination
		ShardWatermarkState sws = shardWatermarks.get(shardStateIndex);
		if (sws == null) {
			sws = new ShardWatermarkState();
			try {
				sws.periodicWatermarkAssigner = InstantiationUtil.clone(periodicWatermarkAssigner);
			} catch (Exception e) {
				throw new RuntimeException("Failed to instantiate new WatermarkAssigner", e);
			}
			sws.emitQueue = recordEmitter.getQueue(shardStateIndex);
			sws.lastUpdated = getCurrentTimeMillis();
			sws.lastRecordTimestamp = Long.MIN_VALUE;
			shardWatermarks.put(shardStateIndex, sws);
		}

		return shardStateIndex;
	}
}
 
Example #12
Source File: ShardConsumerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testCorrectNumOfCollectedRecordsAndUpdatedState() {
	StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);

	LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
	subscribedShardsStateUnderTest.add(
		new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
			fakeToBeConsumedShard, new SequenceNumber("fakeStartingState")));

	TestSourceContext<String> sourceContext = new TestSourceContext<>();

	TestableKinesisDataFetcher<String> fetcher =
		new TestableKinesisDataFetcher<>(
			Collections.singletonList("fakeStream"),
			sourceContext,
			new Properties(),
			new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
			10,
			2,
			new AtomicReference<>(),
			subscribedShardsStateUnderTest,
			KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
			Mockito.mock(KinesisProxyInterface.class));

	int shardIndex = fetcher.registerNewSubscribedShardState(subscribedShardsStateUnderTest.get(0));
	new ShardConsumer<>(
		fetcher,
		shardIndex,
		subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
		FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9, 500L),
		new ShardMetricsReporter()).run();

	assertEquals(1000, sourceContext.getCollectedOutputs().size());
	assertEquals(
		SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum());
}
 
Example #13
Source File: TestableKinesisDataFetcher.java    From flink with Apache License 2.0 5 votes vote down vote up
public TestableKinesisDataFetcher(
		List<String> fakeStreams,
		SourceFunction.SourceContext<T> sourceContext,
		Properties fakeConfiguration,
		KinesisDeserializationSchema<T> deserializationSchema,
		int fakeTotalCountOfSubtasks,
		int fakeIndexOfThisSubtask,
		AtomicReference<Throwable> thrownErrorUnderTest,
		LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest,
		HashMap<String, String> subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
		KinesisProxyInterface fakeKinesis) {
	super(
		fakeStreams,
		sourceContext,
		sourceContext.getCheckpointLock(),
		getMockedRuntimeContext(fakeTotalCountOfSubtasks, fakeIndexOfThisSubtask),
		fakeConfiguration,
		deserializationSchema,
		DEFAULT_SHARD_ASSIGNER,
		null,
		null,
		thrownErrorUnderTest,
		subscribedShardsStateUnderTest,
		subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
		(properties) -> fakeKinesis);

	this.runWaiter = new OneShotLatch();
	this.initialDiscoveryWaiter = new OneShotLatch();
	this.shutdownWaiter = new OneShotLatch();

	this.running = true;
}
 
Example #14
Source File: TestableKinesisDataFetcherForShardConsumerException.java    From flink with Apache License 2.0 5 votes vote down vote up
public TestableKinesisDataFetcherForShardConsumerException(final List<String> fakeStreams,
		final SourceFunction.SourceContext<T> sourceContext,
		final Properties fakeConfiguration,
		final KinesisDeserializationSchema<T> deserializationSchema,
		final int fakeTotalCountOfSubtasks,
		final int fakeIndexOfThisSubtask,
		final AtomicReference<Throwable> thrownErrorUnderTest,
		final LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest,
		final HashMap<String, String> subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
		final KinesisProxyInterface fakeKinesis) {
	super(fakeStreams, sourceContext, fakeConfiguration, deserializationSchema, fakeTotalCountOfSubtasks,
		fakeIndexOfThisSubtask, thrownErrorUnderTest, subscribedShardsStateUnderTest,
		subscribedStreamsToLastDiscoveredShardIdsStateUnderTest, fakeKinesis);
}
 
Example #15
Source File: KinesisDataFetcher.java    From flink with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
protected KinesisDataFetcher(List<String> streams,
							SourceFunction.SourceContext<T> sourceContext,
							Object checkpointLock,
							RuntimeContext runtimeContext,
							Properties configProps,
							KinesisDeserializationSchema<T> deserializationSchema,
							KinesisShardAssigner shardAssigner,
							AssignerWithPeriodicWatermarks<T> periodicWatermarkAssigner,
							WatermarkTracker watermarkTracker,
							AtomicReference<Throwable> error,
							List<KinesisStreamShardState> subscribedShardsState,
							HashMap<String, String> subscribedStreamsToLastDiscoveredShardIds,
							FlinkKinesisProxyFactory kinesisProxyFactory) {
	this.streams = checkNotNull(streams);
	this.configProps = checkNotNull(configProps);
	this.sourceContext = checkNotNull(sourceContext);
	this.checkpointLock = checkNotNull(checkpointLock);
	this.runtimeContext = checkNotNull(runtimeContext);
	this.totalNumberOfConsumerSubtasks = runtimeContext.getNumberOfParallelSubtasks();
	this.indexOfThisConsumerSubtask = runtimeContext.getIndexOfThisSubtask();
	this.deserializationSchema = checkNotNull(deserializationSchema);
	this.shardAssigner = checkNotNull(shardAssigner);
	this.periodicWatermarkAssigner = periodicWatermarkAssigner;
	this.watermarkTracker = watermarkTracker;
	this.kinesisProxyFactory = checkNotNull(kinesisProxyFactory);
	this.kinesis = kinesisProxyFactory.create(configProps);

	this.consumerMetricGroup = runtimeContext.getMetricGroup()
		.addGroup(KinesisConsumerMetricConstants.KINESIS_CONSUMER_METRICS_GROUP);

	this.error = checkNotNull(error);
	this.subscribedShardsState = checkNotNull(subscribedShardsState);
	this.subscribedStreamsToLastDiscoveredShardIds = checkNotNull(subscribedStreamsToLastDiscoveredShardIds);

	this.shardConsumersExecutor =
		createShardConsumersThreadPool(runtimeContext.getTaskNameWithSubtasks());
	this.recordEmitter = createRecordEmitter(configProps);
}
 
Example #16
Source File: ShardConsumerTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testMetricsReporting() {
	StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);

	LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
	subscribedShardsStateUnderTest.add(
		new KinesisStreamShardState(
			KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
			fakeToBeConsumedShard,
			new SequenceNumber("fakeStartingState")));

	TestSourceContext<String> sourceContext = new TestSourceContext<>();

	TestableKinesisDataFetcher<String> fetcher =
		new TestableKinesisDataFetcher<>(
			Collections.singletonList("fakeStream"),
			sourceContext,
			new Properties(),
			new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
			10,
			2,
			new AtomicReference<>(),
			subscribedShardsStateUnderTest,
			KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
			Mockito.mock(KinesisProxyInterface.class));

	ShardMetricsReporter shardMetricsReporter = new ShardMetricsReporter();
	long millisBehindLatest = 500L;
	new ShardConsumer<>(
		fetcher,
		0,
		subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
		FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9, millisBehindLatest),
		shardMetricsReporter).run();

	// the millisBehindLatest metric should have been reported
	assertEquals(millisBehindLatest, shardMetricsReporter.getMillisBehindLatest());
}
 
Example #17
Source File: KinesisDataFetcher.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Register a new subscribed shard state.
 *
 * @param newSubscribedShardState the new shard state that this fetcher is to be subscribed to
 */
public int registerNewSubscribedShardState(KinesisStreamShardState newSubscribedShardState) {
	synchronized (checkpointLock) {
		subscribedShardsState.add(newSubscribedShardState);

		// If a registered shard has initial state that is not SENTINEL_SHARD_ENDING_SEQUENCE_NUM (will be the case
		// if the consumer had already finished reading a shard before we failed and restored), we determine that
		// this subtask has a new active shard
		if (!newSubscribedShardState.getLastProcessedSequenceNum().equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
			this.numberOfActiveShards.incrementAndGet();
		}

		int shardStateIndex = subscribedShardsState.size() - 1;

		// track all discovered shards for watermark determination
		ShardWatermarkState sws = shardWatermarks.get(shardStateIndex);
		if (sws == null) {
			sws = new ShardWatermarkState();
			try {
				sws.periodicWatermarkAssigner = InstantiationUtil.clone(periodicWatermarkAssigner);
			} catch (Exception e) {
				throw new RuntimeException("Failed to instantiate new WatermarkAssigner", e);
			}
			sws.emitQueue = recordEmitter.getQueue(shardStateIndex);
			sws.lastUpdated = getCurrentTimeMillis();
			sws.lastRecordTimestamp = Long.MIN_VALUE;
			shardWatermarks.put(shardStateIndex, sws);
		}

		return shardStateIndex;
	}
}
 
Example #18
Source File: TestableKinesisDataFetcher.java    From flink with Apache License 2.0 5 votes vote down vote up
public TestableKinesisDataFetcher(
		List<String> fakeStreams,
		SourceFunction.SourceContext<T> sourceContext,
		Properties fakeConfiguration,
		KinesisDeserializationSchema<T> deserializationSchema,
		int fakeTotalCountOfSubtasks,
		int fakeIndexOfThisSubtask,
		AtomicReference<Throwable> thrownErrorUnderTest,
		LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest,
		HashMap<String, String> subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
		KinesisProxyInterface fakeKinesis) {
	super(
		fakeStreams,
		sourceContext,
		sourceContext.getCheckpointLock(),
		getMockedRuntimeContext(fakeTotalCountOfSubtasks, fakeIndexOfThisSubtask),
		fakeConfiguration,
		deserializationSchema,
		DEFAULT_SHARD_ASSIGNER,
		null,
		null,
		thrownErrorUnderTest,
		subscribedShardsStateUnderTest,
		subscribedStreamsToLastDiscoveredShardIdsStateUnderTest,
		(properties) -> fakeKinesis);

	this.runWaiter = new OneShotLatch();
	this.initialDiscoveryWaiter = new OneShotLatch();
	this.shutdownWaiter = new OneShotLatch();

	this.running = true;
}
 
Example #19
Source File: KinesisDataFetcher.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Register a new subscribed shard state.
 *
 * @param newSubscribedShardState the new shard state that this fetcher is to be subscribed to
 */
public int registerNewSubscribedShardState(KinesisStreamShardState newSubscribedShardState) {
	synchronized (checkpointLock) {
		subscribedShardsState.add(newSubscribedShardState);

		// If a registered shard has initial state that is not SENTINEL_SHARD_ENDING_SEQUENCE_NUM (will be the case
		// if the consumer had already finished reading a shard before we failed and restored), we determine that
		// this subtask has a new active shard
		if (!newSubscribedShardState.getLastProcessedSequenceNum().equals(SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get())) {
			this.numberOfActiveShards.incrementAndGet();
		}

		int shardStateIndex = subscribedShardsState.size() - 1;

		// track all discovered shards for watermark determination
		ShardWatermarkState sws = shardWatermarks.get(shardStateIndex);
		if (sws == null) {
			sws = new ShardWatermarkState();
			try {
				sws.periodicWatermarkAssigner = InstantiationUtil.clone(periodicWatermarkAssigner);
			} catch (Exception e) {
				throw new RuntimeException("Failed to instantiate new WatermarkAssigner", e);
			}
			sws.emitQueue = recordEmitter.getQueue(shardStateIndex);
			sws.lastUpdated = getCurrentTimeMillis();
			sws.lastRecordTimestamp = Long.MIN_VALUE;
			shardWatermarks.put(shardStateIndex, sws);
		}

		return shardStateIndex;
	}
}
 
Example #20
Source File: FlinkKinesisConsumer.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public void run(SourceContext<T> sourceContext) throws Exception {

	// all subtasks will run a fetcher, regardless of whether or not the subtask will initially have
	// shards to subscribe to; fetchers will continuously poll for changes in the shard list, so all subtasks
	// can potentially have new shards to subscribe to later on
	KinesisDataFetcher<T> fetcher = createFetcher(streams, sourceContext, getRuntimeContext(), configProps, deserializer);

	// initial discovery
	List<StreamShardHandle> allShards = fetcher.discoverNewShardsToSubscribe();

	for (StreamShardHandle shard : allShards) {
		StreamShardMetadata.EquivalenceWrapper kinesisStreamShard =
			new StreamShardMetadata.EquivalenceWrapper(KinesisDataFetcher.convertToStreamShardMetadata(shard));

		if (sequenceNumsToRestore != null) {

			if (sequenceNumsToRestore.containsKey(kinesisStreamShard)) {
				// if the shard was already seen and is contained in the state,
				// just use the sequence number stored in the state
				fetcher.registerNewSubscribedShardState(
					new KinesisStreamShardState(kinesisStreamShard.getShardMetadata(), shard, sequenceNumsToRestore.get(kinesisStreamShard)));

				if (LOG.isInfoEnabled()) {
					LOG.info("Subtask {} is seeding the fetcher with restored shard {}," +
							" starting state set to the restored sequence number {}",
						getRuntimeContext().getIndexOfThisSubtask(), shard.toString(), sequenceNumsToRestore.get(kinesisStreamShard));
				}
			} else {
				// the shard wasn't discovered in the previous run, therefore should be consumed from the beginning
				fetcher.registerNewSubscribedShardState(
					new KinesisStreamShardState(kinesisStreamShard.getShardMetadata(), shard, SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get()));

				if (LOG.isInfoEnabled()) {
					LOG.info("Subtask {} is seeding the fetcher with new discovered shard {}," +
							" starting state set to the SENTINEL_EARLIEST_SEQUENCE_NUM",
						getRuntimeContext().getIndexOfThisSubtask(), shard.toString());
				}
			}
		} else {
			// we're starting fresh; use the configured start position as initial state
			SentinelSequenceNumber startingSeqNum =
				InitialPosition.valueOf(configProps.getProperty(
					ConsumerConfigConstants.STREAM_INITIAL_POSITION,
					ConsumerConfigConstants.DEFAULT_STREAM_INITIAL_POSITION)).toSentinelSequenceNumber();

			fetcher.registerNewSubscribedShardState(
				new KinesisStreamShardState(kinesisStreamShard.getShardMetadata(), shard, startingSeqNum.get()));

			if (LOG.isInfoEnabled()) {
				LOG.info("Subtask {} will be seeded with initial shard {}, starting state set as sequence number {}",
					getRuntimeContext().getIndexOfThisSubtask(), shard.toString(), startingSeqNum.get());
			}
		}
	}

	// check that we are running before starting the fetcher
	if (!running) {
		return;
	}

	// expose the fetcher from this point, so that state
	// snapshots can be taken from the fetcher's state holders
	this.fetcher = fetcher;

	// start the fetcher loop. The fetcher will stop running only when cancel() or
	// close() is called, or an error is thrown by threads created by the fetcher
	fetcher.runFetcher();

	// check that the fetcher has terminated before fully closing
	fetcher.awaitTermination();
	sourceContext.close();
}
 
Example #21
Source File: KinesisDataFetcher.java    From flink with Apache License 2.0 4 votes vote down vote up
@VisibleForTesting
public List<KinesisStreamShardState> getSubscribedShardsState() {
	return subscribedShardsState;
}
 
Example #22
Source File: ShardConsumerTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testCorrectNumOfCollectedRecordsAndUpdatedStateWithAdaptiveReads() {
	Properties consumerProperties = new Properties();
	consumerProperties.put("flink.shard.adaptivereads", "true");

	StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);

	LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
	subscribedShardsStateUnderTest.add(
		new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
			fakeToBeConsumedShard, new SequenceNumber("fakeStartingState")));

	TestSourceContext<String> sourceContext = new TestSourceContext<>();

	TestableKinesisDataFetcher<String> fetcher =
		new TestableKinesisDataFetcher<>(
			Collections.singletonList("fakeStream"),
			sourceContext,
			consumerProperties,
			new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
			10,
			2,
			new AtomicReference<>(),
			subscribedShardsStateUnderTest,
			KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
			Mockito.mock(KinesisProxyInterface.class));

	int shardIndex = fetcher.registerNewSubscribedShardState(subscribedShardsStateUnderTest.get(0));
	new ShardConsumer<>(
		fetcher,
		shardIndex,
		subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
		// Initial number of records to fetch --> 10
		FakeKinesisBehavioursFactory.initialNumOfRecordsAfterNumOfGetRecordsCallsWithAdaptiveReads(10, 2, 500L),
		new ShardMetricsReporter()).run();

	// Avg record size for first batch --> 10 * 10 Kb/10 = 10 Kb
	// Number of records fetched in second batch --> 2 Mb/10Kb * 5 = 40
	// Total number of records = 10 + 40 = 50
	assertEquals(50, sourceContext.getCollectedOutputs().size());
	assertEquals(
		SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum());
}
 
Example #23
Source File: FlinkKinesisConsumerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
@SuppressWarnings("unchecked")
public void testFetcherShouldBeCorrectlySeededWithNewDiscoveredKinesisStreamShard() throws Exception {

	// ----------------------------------------------------------------------
	// setup initial state
	// ----------------------------------------------------------------------

	HashMap<StreamShardHandle, SequenceNumber> fakeRestoredState = getFakeRestoredStore("all");

	// ----------------------------------------------------------------------
	// mock operator state backend and initial state for initializeState()
	// ----------------------------------------------------------------------

	TestingListState<Tuple2<StreamShardMetadata, SequenceNumber>> listState = new TestingListState<>();
	for (Map.Entry<StreamShardHandle, SequenceNumber> state : fakeRestoredState.entrySet()) {
		listState.add(Tuple2.of(KinesisDataFetcher.convertToStreamShardMetadata(state.getKey()), state.getValue()));
	}

	OperatorStateStore operatorStateStore = mock(OperatorStateStore.class);
	when(operatorStateStore.getUnionListState(Matchers.any(ListStateDescriptor.class))).thenReturn(listState);

	StateInitializationContext initializationContext = mock(StateInitializationContext.class);
	when(initializationContext.getOperatorStateStore()).thenReturn(operatorStateStore);
	when(initializationContext.isRestored()).thenReturn(true);

	// ----------------------------------------------------------------------
	// mock fetcher
	// ----------------------------------------------------------------------

	KinesisDataFetcher mockedFetcher = mockKinesisDataFetcher();
	List<StreamShardHandle> shards = new ArrayList<>();
	shards.addAll(fakeRestoredState.keySet());
	shards.add(new StreamShardHandle("fakeStream2",
		new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(2))));
	when(mockedFetcher.discoverNewShardsToSubscribe()).thenReturn(shards);

	// assume the given config is correct
	PowerMockito.mockStatic(KinesisConfigUtil.class);
	PowerMockito.doNothing().when(KinesisConfigUtil.class);

	// ----------------------------------------------------------------------
	// start to test fetcher's initial state seeding
	// ----------------------------------------------------------------------

	TestableFlinkKinesisConsumer consumer = new TestableFlinkKinesisConsumer(
		"fakeStream", new Properties(), 10, 2);
	consumer.initializeState(initializationContext);
	consumer.open(new Configuration());
	consumer.run(Mockito.mock(SourceFunction.SourceContext.class));

	fakeRestoredState.put(new StreamShardHandle("fakeStream2",
			new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(2))),
		SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get());
	for (Map.Entry<StreamShardHandle, SequenceNumber> restoredShard : fakeRestoredState.entrySet()) {
		Mockito.verify(mockedFetcher).registerNewSubscribedShardState(
			new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(restoredShard.getKey()),
				restoredShard.getKey(), restoredShard.getValue()));
	}
}
 
Example #24
Source File: ShardConsumerTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testCorrectNumOfCollectedRecordsAndUpdatedStateWithUnexpectedExpiredIterator() {
	StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);

	LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
	subscribedShardsStateUnderTest.add(
		new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
			fakeToBeConsumedShard, new SequenceNumber("fakeStartingState")));

	TestSourceContext<String> sourceContext = new TestSourceContext<>();

	TestableKinesisDataFetcher<String> fetcher =
		new TestableKinesisDataFetcher<>(
			Collections.singletonList("fakeStream"),
			sourceContext,
			new Properties(),
			new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
			10,
			2,
			new AtomicReference<>(),
			subscribedShardsStateUnderTest,
			KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
			Mockito.mock(KinesisProxyInterface.class));

	int shardIndex = fetcher.registerNewSubscribedShardState(subscribedShardsStateUnderTest.get(0));
	new ShardConsumer<>(
		fetcher,
		shardIndex,
		subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
		// Get a total of 1000 records with 9 getRecords() calls,
		// and the 7th getRecords() call will encounter an unexpected expired shard iterator
		FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCallsWithUnexpectedExpiredIterator(
			1000, 9, 7, 500L),
		new ShardMetricsReporter()).run();

	assertEquals(1000, sourceContext.getCollectedOutputs().size());
	assertEquals(
		SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum());
}
 
Example #25
Source File: FlinkKinesisConsumer.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public void run(SourceContext<T> sourceContext) throws Exception {

	// all subtasks will run a fetcher, regardless of whether or not the subtask will initially have
	// shards to subscribe to; fetchers will continuously poll for changes in the shard list, so all subtasks
	// can potentially have new shards to subscribe to later on
	KinesisDataFetcher<T> fetcher = createFetcher(streams, sourceContext, getRuntimeContext(), configProps, deserializer);

	// initial discovery
	List<StreamShardHandle> allShards = fetcher.discoverNewShardsToSubscribe();

	for (StreamShardHandle shard : allShards) {
		StreamShardMetadata.EquivalenceWrapper kinesisStreamShard =
			new StreamShardMetadata.EquivalenceWrapper(KinesisDataFetcher.convertToStreamShardMetadata(shard));

		if (sequenceNumsToRestore != null) {

			if (sequenceNumsToRestore.containsKey(kinesisStreamShard)) {
				// if the shard was already seen and is contained in the state,
				// just use the sequence number stored in the state
				fetcher.registerNewSubscribedShardState(
					new KinesisStreamShardState(kinesisStreamShard.getShardMetadata(), shard, sequenceNumsToRestore.get(kinesisStreamShard)));

				if (LOG.isInfoEnabled()) {
					LOG.info("Subtask {} is seeding the fetcher with restored shard {}," +
							" starting state set to the restored sequence number {}",
						getRuntimeContext().getIndexOfThisSubtask(), shard.toString(), sequenceNumsToRestore.get(kinesisStreamShard));
				}
			} else {
				// the shard wasn't discovered in the previous run, therefore should be consumed from the beginning
				fetcher.registerNewSubscribedShardState(
					new KinesisStreamShardState(kinesisStreamShard.getShardMetadata(), shard, SentinelSequenceNumber.SENTINEL_EARLIEST_SEQUENCE_NUM.get()));

				if (LOG.isInfoEnabled()) {
					LOG.info("Subtask {} is seeding the fetcher with new discovered shard {}," +
							" starting state set to the SENTINEL_EARLIEST_SEQUENCE_NUM",
						getRuntimeContext().getIndexOfThisSubtask(), shard.toString());
				}
			}
		} else {
			// we're starting fresh; use the configured start position as initial state
			SentinelSequenceNumber startingSeqNum =
				InitialPosition.valueOf(configProps.getProperty(
					ConsumerConfigConstants.STREAM_INITIAL_POSITION,
					ConsumerConfigConstants.DEFAULT_STREAM_INITIAL_POSITION)).toSentinelSequenceNumber();

			fetcher.registerNewSubscribedShardState(
				new KinesisStreamShardState(kinesisStreamShard.getShardMetadata(), shard, startingSeqNum.get()));

			if (LOG.isInfoEnabled()) {
				LOG.info("Subtask {} will be seeded with initial shard {}, starting state set as sequence number {}",
					getRuntimeContext().getIndexOfThisSubtask(), shard.toString(), startingSeqNum.get());
			}
		}
	}

	// check that we are running before starting the fetcher
	if (!running) {
		return;
	}

	// expose the fetcher from this point, so that state
	// snapshots can be taken from the fetcher's state holders
	this.fetcher = fetcher;

	// start the fetcher loop. The fetcher will stop running only when cancel() or
	// close() is called, or an error is thrown by threads created by the fetcher
	fetcher.runFetcher();

	// check that the fetcher has terminated before fully closing
	fetcher.awaitTermination();
	sourceContext.close();
}
 
Example #26
Source File: ShardConsumerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testMetricsReporting() {
	StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);

	LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
	subscribedShardsStateUnderTest.add(
		new KinesisStreamShardState(
			KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
			fakeToBeConsumedShard,
			new SequenceNumber("fakeStartingState")));

	TestSourceContext<String> sourceContext = new TestSourceContext<>();

	KinesisDeserializationSchemaWrapper<String> deserializationSchema = new KinesisDeserializationSchemaWrapper<>(
		new SimpleStringSchema());
	TestableKinesisDataFetcher<String> fetcher =
		new TestableKinesisDataFetcher<>(
			Collections.singletonList("fakeStream"),
			sourceContext,
			new Properties(),
			deserializationSchema,
			10,
			2,
			new AtomicReference<>(),
			subscribedShardsStateUnderTest,
			KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
			Mockito.mock(KinesisProxyInterface.class));

	ShardMetricsReporter shardMetricsReporter = new ShardMetricsReporter();
	long millisBehindLatest = 500L;
	new ShardConsumer<>(
		fetcher,
		0,
		subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
		FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9, millisBehindLatest),
		shardMetricsReporter,
		deserializationSchema)
		.run();

	// the millisBehindLatest metric should have been reported
	assertEquals(millisBehindLatest, shardMetricsReporter.getMillisBehindLatest());
}
 
Example #27
Source File: ShardConsumerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testCorrectNumOfCollectedRecordsAndUpdatedState() {
	StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);

	LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
	subscribedShardsStateUnderTest.add(
		new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
			fakeToBeConsumedShard, new SequenceNumber("fakeStartingState")));

	TestSourceContext<String> sourceContext = new TestSourceContext<>();

	KinesisDeserializationSchemaWrapper<String> deserializationSchema = new KinesisDeserializationSchemaWrapper<>(
		new SimpleStringSchema());
	TestableKinesisDataFetcher<String> fetcher =
		new TestableKinesisDataFetcher<>(
			Collections.singletonList("fakeStream"),
			sourceContext,
			new Properties(),
			deserializationSchema,
			10,
			2,
			new AtomicReference<>(),
			subscribedShardsStateUnderTest,
			KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
			Mockito.mock(KinesisProxyInterface.class));

	int shardIndex = fetcher.registerNewSubscribedShardState(subscribedShardsStateUnderTest.get(0));
	new ShardConsumer<>(
		fetcher,
		shardIndex,
		subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
		FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCalls(1000, 9, 500L),
		new ShardMetricsReporter(),
		deserializationSchema)
		.run();

	assertEquals(1000, sourceContext.getCollectedOutputs().size());
	assertEquals(
		SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum());
}
 
Example #28
Source File: ShardConsumerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testCorrectNumOfCollectedRecordsAndUpdatedStateWithUnexpectedExpiredIterator() {
	StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);

	LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
	subscribedShardsStateUnderTest.add(
		new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
			fakeToBeConsumedShard, new SequenceNumber("fakeStartingState")));

	TestSourceContext<String> sourceContext = new TestSourceContext<>();

	KinesisDeserializationSchemaWrapper<String> deserializationSchema = new KinesisDeserializationSchemaWrapper<>(
		new SimpleStringSchema());
	TestableKinesisDataFetcher<String> fetcher =
		new TestableKinesisDataFetcher<>(
			Collections.singletonList("fakeStream"),
			sourceContext,
			new Properties(),
			deserializationSchema,
			10,
			2,
			new AtomicReference<>(),
			subscribedShardsStateUnderTest,
			KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
			Mockito.mock(KinesisProxyInterface.class));

	int shardIndex = fetcher.registerNewSubscribedShardState(subscribedShardsStateUnderTest.get(0));
	new ShardConsumer<>(
		fetcher,
		shardIndex,
		subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
		// Get a total of 1000 records with 9 getRecords() calls,
		// and the 7th getRecords() call will encounter an unexpected expired shard iterator
		FakeKinesisBehavioursFactory.totalNumOfRecordsAfterNumOfGetRecordsCallsWithUnexpectedExpiredIterator(
			1000, 9, 7, 500L),
		new ShardMetricsReporter(),
		deserializationSchema)
		.run();

	assertEquals(1000, sourceContext.getCollectedOutputs().size());
	assertEquals(
		SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum());
}
 
Example #29
Source File: FlinkKinesisConsumerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * FLINK-8484: ensure that a state change in the StreamShardMetadata other than {@link StreamShardMetadata#getShardId()} or
 * {@link StreamShardMetadata#getStreamName()} does not result in the shard not being able to be restored.
 * This handles the corner case where the stored shard metadata is open (no ending sequence number), but after the
 * job restore, the shard has been closed (ending number set) due to re-sharding, and we can no longer rely on
 * {@link StreamShardMetadata#equals(Object)} to find back the sequence number in the collection of restored shard metadata.
 * <p></p>
 * Therefore, we will rely on synchronizing the snapshot's state with the Kinesis shard before attempting to find back
 * the sequence number to restore.
 */
@Test
public void testFindSequenceNumberToRestoreFromIfTheShardHasBeenClosedSinceTheStateWasStored() throws Exception {
	// ----------------------------------------------------------------------
	// setup initial state
	// ----------------------------------------------------------------------

	HashMap<StreamShardHandle, SequenceNumber> fakeRestoredState = getFakeRestoredStore("all");

	// ----------------------------------------------------------------------
	// mock operator state backend and initial state for initializeState()
	// ----------------------------------------------------------------------

	TestingListState<Tuple2<StreamShardMetadata, SequenceNumber>> listState = new TestingListState<>();
	for (Map.Entry<StreamShardHandle, SequenceNumber> state : fakeRestoredState.entrySet()) {
		listState.add(Tuple2.of(KinesisDataFetcher.convertToStreamShardMetadata(state.getKey()), state.getValue()));
	}

	OperatorStateStore operatorStateStore = mock(OperatorStateStore.class);
	when(operatorStateStore.getUnionListState(Matchers.any(ListStateDescriptor.class))).thenReturn(listState);

	StateInitializationContext initializationContext = mock(StateInitializationContext.class);
	when(initializationContext.getOperatorStateStore()).thenReturn(operatorStateStore);
	when(initializationContext.isRestored()).thenReturn(true);

	// ----------------------------------------------------------------------
	// mock fetcher
	// ----------------------------------------------------------------------

	KinesisDataFetcher mockedFetcher = mockKinesisDataFetcher();
	List<StreamShardHandle> shards = new ArrayList<>();

	// create a fake stream shard handle based on the first entry in the restored state
	final StreamShardHandle originalStreamShardHandle = fakeRestoredState.keySet().iterator().next();
	final StreamShardHandle closedStreamShardHandle = new StreamShardHandle(originalStreamShardHandle.getStreamName(), originalStreamShardHandle.getShard());
	// close the shard handle by setting an ending sequence number
	final SequenceNumberRange sequenceNumberRange = new SequenceNumberRange();
	sequenceNumberRange.setEndingSequenceNumber("1293844");
	closedStreamShardHandle.getShard().setSequenceNumberRange(sequenceNumberRange);

	shards.add(closedStreamShardHandle);

	when(mockedFetcher.discoverNewShardsToSubscribe()).thenReturn(shards);

	// assume the given config is correct
	PowerMockito.mockStatic(KinesisConfigUtil.class);
	PowerMockito.doNothing().when(KinesisConfigUtil.class);

	// ----------------------------------------------------------------------
	// start to test fetcher's initial state seeding
	// ----------------------------------------------------------------------

	TestableFlinkKinesisConsumer consumer = new TestableFlinkKinesisConsumer(
		"fakeStream", new Properties(), 10, 2);
	consumer.initializeState(initializationContext);
	consumer.open(new Configuration());
	consumer.run(Mockito.mock(SourceFunction.SourceContext.class));

	Mockito.verify(mockedFetcher).registerNewSubscribedShardState(
		new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(closedStreamShardHandle),
			closedStreamShardHandle, fakeRestoredState.get(closedStreamShardHandle)));
}
 
Example #30
Source File: ShardConsumerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testCorrectNumOfCollectedRecordsAndUpdatedStateWithAdaptiveReads() {
	Properties consumerProperties = new Properties();
	consumerProperties.put("flink.shard.adaptivereads", "true");

	StreamShardHandle fakeToBeConsumedShard = getMockStreamShard("fakeStream", 0);

	LinkedList<KinesisStreamShardState> subscribedShardsStateUnderTest = new LinkedList<>();
	subscribedShardsStateUnderTest.add(
		new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(fakeToBeConsumedShard),
			fakeToBeConsumedShard, new SequenceNumber("fakeStartingState")));

	TestSourceContext<String> sourceContext = new TestSourceContext<>();

	KinesisDeserializationSchemaWrapper<String> deserializationSchema = new KinesisDeserializationSchemaWrapper<>(
		new SimpleStringSchema());
	TestableKinesisDataFetcher<String> fetcher =
		new TestableKinesisDataFetcher<>(
			Collections.singletonList("fakeStream"),
			sourceContext,
			consumerProperties,
			deserializationSchema,
			10,
			2,
			new AtomicReference<>(),
			subscribedShardsStateUnderTest,
			KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(Collections.singletonList("fakeStream")),
			Mockito.mock(KinesisProxyInterface.class));

	int shardIndex = fetcher.registerNewSubscribedShardState(subscribedShardsStateUnderTest.get(0));
	new ShardConsumer<>(
		fetcher,
		shardIndex,
		subscribedShardsStateUnderTest.get(0).getStreamShardHandle(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum(),
		// Initial number of records to fetch --> 10
		FakeKinesisBehavioursFactory.initialNumOfRecordsAfterNumOfGetRecordsCallsWithAdaptiveReads(10, 2, 500L),
		new ShardMetricsReporter(),
		deserializationSchema)
		.run();

	// Avg record size for first batch --> 10 * 10 Kb/10 = 10 Kb
	// Number of records fetched in second batch --> 2 Mb/10Kb * 5 = 40
	// Total number of records = 10 + 40 = 50
	assertEquals(50, sourceContext.getCollectedOutputs().size());
	assertEquals(
		SentinelSequenceNumber.SENTINEL_SHARD_ENDING_SEQUENCE_NUM.get(),
		subscribedShardsStateUnderTest.get(0).getLastProcessedSequenceNum());
}