Java Code Examples for org.apache.flink.core.testutils.CheckedThread#start()

The following examples show how to use org.apache.flink.core.testutils.CheckedThread#start() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: NetworkBufferPoolTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link NetworkBufferPool#requestMemorySegments()}, verifying it may be aborted and
 * remains in a defined state even if the waiting is interrupted.
 */
@Test
public void testRequestMemorySegmentsInterruptable2() throws Exception {
	final int numBuffers = 10;

	NetworkBufferPool globalPool = new NetworkBufferPool(numBuffers, 128, 10);
	MemorySegment segment = globalPool.requestMemorySegment();
	assertNotNull(segment);

	final OneShotLatch isRunning = new OneShotLatch();
	CheckedThread asyncRequest = new CheckedThread() {
		@Override
		public void go() throws Exception {
			isRunning.trigger();
			globalPool.requestMemorySegments();
		}
	};
	asyncRequest.start();

	// We want the destroy call inside the blocking part of the globalPool.requestMemorySegments()
	// call above. We cannot guarantee this though but make it highly probable:
	isRunning.await();
	Thread.sleep(10);
	asyncRequest.interrupt();

	globalPool.recycle(segment);

	try {
		asyncRequest.sync();
	} catch (IOException e) {
		assertThat(e, hasProperty("cause", instanceOf(InterruptedException.class)));

		// test indirectly for NetworkBufferPool#numTotalRequiredBuffers being correct:
		// -> creating a new buffer pool should not fail
		globalPool.createBufferPool(10, 10);
	} finally {
		globalPool.destroy();
	}
}
 
Example 2
Source File: NetworkBufferPoolTest.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link NetworkBufferPool#requestMemorySegments(int)}, verifying it may be aborted and
 * remains in a defined state even if the waiting is interrupted.
 */
@Test
public void testRequestMemorySegmentsInterruptable2() throws Exception {
	final int numBuffers = 10;

	NetworkBufferPool globalPool = new NetworkBufferPool(numBuffers, 128);
	MemorySegment segment = globalPool.requestMemorySegment();
	assertNotNull(segment);

	final OneShotLatch isRunning = new OneShotLatch();
	CheckedThread asyncRequest = new CheckedThread() {
		@Override
		public void go() throws Exception {
			isRunning.trigger();
			globalPool.requestMemorySegments(10);
		}
	};
	asyncRequest.start();

	// We want the destroy call inside the blocking part of the globalPool.requestMemorySegments()
	// call above. We cannot guarantee this though but make it highly probable:
	isRunning.await();
	Thread.sleep(10);
	asyncRequest.interrupt();

	globalPool.recycle(segment);

	try {
		asyncRequest.sync();
	} catch (IOException e) {
		assertThat(e, hasProperty("cause", instanceOf(InterruptedException.class)));

		// test indirectly for NetworkBufferPool#numTotalRequiredBuffers being correct:
		// -> creating a new buffer pool should not fail
		globalPool.createBufferPool(10, 10);
	} finally {
		globalPool.destroy();
	}
}
 
Example 3
Source File: AvroSerializerConcurrencyTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testConcurrentUseOfSerializer() throws Exception {
	final AvroSerializer<String> serializer = new AvroSerializer<>(String.class);

	final BlockerSync sync = new BlockerSync();

	final DataOutputView regularOut = new DataOutputSerializer(32);
	final DataOutputView lockingOut = new LockingView(sync);

	// this thread serializes and gets stuck there
	final CheckedThread thread = new CheckedThread("serializer") {
		@Override
		public void go() throws Exception {
			serializer.serialize("a value", lockingOut);
		}
	};

	thread.start();
	sync.awaitBlocker();

	// this should fail with an exception
	try {
		serializer.serialize("value", regularOut);
		fail("should have failed with an exception");
	}
	catch (IllegalStateException e) {
		// expected
	}
	finally {
		// release the thread that serializes
		sync.releaseBlocker();
	}

	// this propagates exceptions from the spawned thread
	thread.sync();
}
 
Example 4
Source File: NetworkBufferPoolTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link NetworkBufferPool#requestMemorySegments()}, verifying it may be aborted in
 * case of a concurrent {@link NetworkBufferPool#destroy()} call.
 */
@Test
public void testRequestMemorySegmentsInterruptable() throws Exception {
	final int numBuffers = 10;

	NetworkBufferPool globalPool = new NetworkBufferPool(numBuffers, 128, 10);
	MemorySegment segment = globalPool.requestMemorySegment();
	assertNotNull(segment);

	final OneShotLatch isRunning = new OneShotLatch();
	CheckedThread asyncRequest = new CheckedThread() {
		@Override
		public void go() throws Exception {
			isRunning.trigger();
			globalPool.requestMemorySegments();
		}
	};
	asyncRequest.start();

	// We want the destroy call inside the blocking part of the globalPool.requestMemorySegments()
	// call above. We cannot guarantee this though but make it highly probable:
	isRunning.await();
	Thread.sleep(10);
	globalPool.destroy();

	segment.free();

	expectedException.expect(IllegalStateException.class);
	expectedException.expectMessage("destroyed");
	try {
		asyncRequest.sync();
	} finally {
		globalPool.destroy();
	}
}
 
Example 5
Source File: JobManagerMetricsITCase.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
	jobExecuteThread = new CheckedThread() {

		@Override
		public void go() throws Exception {
			StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
			env.addSource(new SourceFunction<String>() {

				@Override
				public void run(SourceContext<String> ctx) throws Exception {
					sync.block();
				}

				@Override
				public void cancel() {
					sync.releaseBlocker();
				}

			}).addSink(new PrintSinkFunction());

			env.execute();
		}

	};

	jobExecuteThread.start();
	sync.awaitBlocker();
}
 
Example 6
Source File: NetworkBufferPoolTest.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Tests {@link NetworkBufferPool#requestMemorySegments()} and verifies it will end exceptionally
 * when failing to acquire all the segments in the specific timeout.
 */
@Test
public void testRequestMemorySegmentsTimeout() throws Exception {
	final int numBuffers = 10;
	final int numberOfSegmentsToRequest = 2;
	final Duration requestSegmentsTimeout = Duration.ofMillis(50L);

	NetworkBufferPool globalPool = new NetworkBufferPool(
			numBuffers,
			128,
			numberOfSegmentsToRequest,
			requestSegmentsTimeout);

	BufferPool localBufferPool = globalPool.createBufferPool(0, numBuffers);
	for (int i = 0; i < numBuffers; ++i) {
		localBufferPool.requestBuffer();
	}

	assertEquals(0, globalPool.getNumberOfAvailableMemorySegments());

	CheckedThread asyncRequest = new CheckedThread() {
		@Override
		public void go() throws Exception {
			globalPool.requestMemorySegments();
		}
	};

	asyncRequest.start();

	expectedException.expect(IOException.class);
	expectedException.expectMessage("Timeout");

	try {
		asyncRequest.sync();
	} finally {
		globalPool.destroy();
	}
}
 
Example 7
Source File: KryoSerializerConcurrencyTest.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test
public void testConcurrentUseOfSerializer() throws Exception {
	final KryoSerializer<String> serializer = new KryoSerializer<>(String.class, new ExecutionConfig());

	final BlockerSync sync = new BlockerSync();

	final DataOutputView regularOut = new DataOutputSerializer(32);
	final DataOutputView lockingOut = new LockingView(sync);

	// this thread serializes and gets stuck there
	final CheckedThread thread = new CheckedThread("serializer") {
		@Override
		public void go() throws Exception {
			serializer.serialize("a value", lockingOut);
		}
	};

	thread.start();
	sync.awaitBlocker();

	// this should fail with an exception
	try {
		serializer.serialize("value", regularOut);
		fail("should have failed with an exception");
	}
	catch (IllegalStateException e) {
		// expected
	}
	finally {
		// release the thread that serializes
		sync.releaseBlocker();
	}

	// this propagates exceptions from the spawned thread
	thread.sync();
}
 
Example 8
Source File: KinesisDataFetcherTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Test
public void testStreamToLastSeenShardStateIsCorrectlySetWhenNoNewShardsSinceRestoredCheckpointAndSomeStreamsDoNotExist() throws Exception {
	List<String> fakeStreams = new LinkedList<>();
	fakeStreams.add("fakeStream1");
	fakeStreams.add("fakeStream2");
	fakeStreams.add("fakeStream3"); // fakeStream3 will not have any shards
	fakeStreams.add("fakeStream4"); // fakeStream4 will not have any shards

	Map<StreamShardHandle, String> restoredStateUnderTest = new HashMap<>();

	// fakeStream1 has 3 shards before restore
	restoredStateUnderTest.put(
		new StreamShardHandle(
			"fakeStream1",
			new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(0))),
		UUID.randomUUID().toString());
	restoredStateUnderTest.put(
		new StreamShardHandle(
			"fakeStream1",
			new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(1))),
		UUID.randomUUID().toString());
	restoredStateUnderTest.put(
		new StreamShardHandle(
			"fakeStream1",
			new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(2))),
		UUID.randomUUID().toString());

	// fakeStream2 has 2 shards before restore
	restoredStateUnderTest.put(
		new StreamShardHandle(
			"fakeStream2",
			new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(0))),
		UUID.randomUUID().toString());
	restoredStateUnderTest.put(
		new StreamShardHandle(
			"fakeStream2",
			new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(1))),
		UUID.randomUUID().toString());

	Map<String, Integer> streamToShardCount = new HashMap<>();
	streamToShardCount.put("fakeStream1", 3); // fakeStream1 has fixed 3 shards
	streamToShardCount.put("fakeStream2", 2); // fakeStream2 has fixed 2 shards
	streamToShardCount.put("fakeStream3", 0); // no shards can be found for fakeStream3
	streamToShardCount.put("fakeStream4", 0); // no shards can be found for fakeStream4

	HashMap<String, String> subscribedStreamsToLastSeenShardIdsUnderTest =
		KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(fakeStreams);

	// using a non-resharded streams kinesis behaviour to represent that Kinesis is not resharded AFTER the restore
	final TestableKinesisDataFetcher<String> fetcher =
		new TestableKinesisDataFetcher<>(
			fakeStreams,
			new TestSourceContext<>(),
			TestUtils.getStandardProperties(),
			new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
			10,
			2,
			new AtomicReference<>(),
			new LinkedList<>(),
			subscribedStreamsToLastSeenShardIdsUnderTest,
			FakeKinesisBehavioursFactory.nonReshardedStreamsBehaviour(streamToShardCount));

	for (Map.Entry<StreamShardHandle, String> restoredState : restoredStateUnderTest.entrySet()) {
		fetcher.advanceLastDiscoveredShardOfStream(restoredState.getKey().getStreamName(), restoredState.getKey().getShard().getShardId());
		fetcher.registerNewSubscribedShardState(
			new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(restoredState.getKey()),
				restoredState.getKey(), new SequenceNumber(restoredState.getValue())));
	}

	CheckedThread runFetcherThread = new CheckedThread() {
		@Override
		public void go() throws Exception {
			fetcher.runFetcher();
		}
	};
	runFetcherThread.start();

	fetcher.waitUntilInitialDiscovery();
	fetcher.shutdownFetcher();
	runFetcherThread.sync();

	// assert that the streams tracked in the state are identical to the subscribed streams
	Set<String> streamsInState = subscribedStreamsToLastSeenShardIdsUnderTest.keySet();
	assertEquals(fakeStreams.size(), streamsInState.size());
	assertTrue(streamsInState.containsAll(fakeStreams));

	// assert that the last seen shards in state is correctly set
	assertEquals(
		KinesisShardIdGenerator.generateFromShardOrder(2),
		subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream1"));
	assertEquals(
		KinesisShardIdGenerator.generateFromShardOrder(1),
		subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream2"));
	assertNull(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream3"));
	assertNull(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream4"));
}
 
Example 9
Source File: FlinkPulsarSinkTest.java    From pulsar-flink with Apache License 2.0 4 votes vote down vote up
/**
 * Test ensuring that the producer is not dropping buffered records;
 * we set a timeout because the test will not finish if the logic is broken.
 */
@SuppressWarnings("unchecked")
@Test(timeout = 10000)
public void testAtLeastOnceProducer() throws Throwable {
    final DummyFlinkPulsarSink<String> sink = new DummyFlinkPulsarSink<>(dummyClientConf(), dummyProperties(), mock(TopicKeyExtractor.class), null);

    final Producer mockProducer = sink.getProducer("tp");

    final OneInputStreamOperatorTestHarness<String, Object> testHarness =
            new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

    testHarness.open();

    testHarness.processElement(new StreamRecord<>("msg-1"));
    testHarness.processElement(new StreamRecord<>("msg-2"));
    testHarness.processElement(new StreamRecord<>("msg-3"));

    verify(mockProducer, times(3));
    Assert.assertEquals(3, sink.getPendingSize());

    // start a thread to perform checkpointing
    CheckedThread snapshotThread = new CheckedThread() {
        @Override
        public void go() throws Exception {
            // this should block until all records are flushed;
            // if the snapshot implementation returns before pending records are flushed,
            testHarness.snapshot(123L, 123L);
        }
    };
    snapshotThread.start();

    // before proceeding, make sure that flushing has started and that the snapshot is still blocked;
    // this would block forever if the snapshot didn't perform a flush
    sink.waitUntilFlushStarted();
    Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive());

    // now, complete the callbacks
    sink.getPendingCallbacks().get(0).accept(null, null);
    Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive());
    Assert.assertEquals(2, sink.getPendingSize());

    sink.getPendingCallbacks().get(1).accept(null, null);
    Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive());
    Assert.assertEquals(1, sink.getPendingSize());

    sink.getPendingCallbacks().get(2).accept(null, null);
    Assert.assertEquals(0, sink.getPendingSize());

    // this would fail with an exception if flushing wasn't completed before the snapshot method returned
    snapshotThread.sync();

    testHarness.close();
}
 
Example 10
Source File: KinesisDataFetcherTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testSkipCorruptedRecord() throws Exception {
	final String stream = "fakeStream";
	final int numShards = 3;

	final LinkedList<KinesisStreamShardState> testShardStates = new LinkedList<>();
	final TestSourceContext<String> sourceContext = new TestSourceContext<>();

	final TestableKinesisDataFetcher<String> fetcher = new TestableKinesisDataFetcher<>(
		Collections.singletonList(stream),
		sourceContext,
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		1,
		0,
		new AtomicReference<>(),
		testShardStates,
		new HashMap<>(),
		FakeKinesisBehavioursFactory.nonReshardedStreamsBehaviour(Collections.singletonMap(stream, numShards)));

	// FlinkKinesisConsumer is responsible for setting up the fetcher before it can be run;
	// run the consumer until it reaches the point where the fetcher starts to run
	final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>(TestUtils.getStandardProperties(), fetcher, 1, 0);

	CheckedThread consumerThread = new CheckedThread() {
		@Override
		public void go() throws Exception {
			consumer.run(new TestSourceContext<>());
		}
	};
	consumerThread.start();

	fetcher.waitUntilRun();
	consumer.cancel();
	consumerThread.sync();

	assertEquals(numShards, testShardStates.size());

	for (int i = 0; i < numShards; i++) {
		fetcher.emitRecordAndUpdateState("record-" + i, 10L, i, new SequenceNumber("seq-num-1"));
		assertEquals(new SequenceNumber("seq-num-1"), testShardStates.get(i).getLastProcessedSequenceNum());
		assertEquals(new StreamRecord<>("record-" + i, 10L), sourceContext.removeLatestOutput());
	}

	// emitting a null (i.e., a corrupt record) should not produce any output, but still have the shard state updated
	fetcher.emitRecordAndUpdateState(null, 10L, 1, new SequenceNumber("seq-num-2"));
		assertEquals(new SequenceNumber("seq-num-2"), testShardStates.get(1).getLastProcessedSequenceNum());
	assertEquals(null, sourceContext.removeLatestOutput()); // no output should have been collected
}
 
Example 11
Source File: FlinkKinesisProducerTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Test ensuring that the producer blocks if the queue limit is exceeded,
 * until the queue length drops below the limit;
 * we set a timeout because the test will not finish if the logic is broken.
 */
@Test(timeout = 10000)
public void testBackpressure() throws Throwable {
	final DummyFlinkKinesisProducer<String> producer = new DummyFlinkKinesisProducer<>(new SimpleStringSchema());
	producer.setQueueLimit(1);

	OneInputStreamOperatorTestHarness<String, Object> testHarness =
			new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer));

	testHarness.open();

	UserRecordResult result = mock(UserRecordResult.class);
	when(result.isSuccessful()).thenReturn(true);

	CheckedThread msg1 = new CheckedThread() {
		@Override
		public void go() throws Exception {
			testHarness.processElement(new StreamRecord<>("msg-1"));
		}
	};
	msg1.start();
	msg1.trySync(100);
	assertFalse("Flush triggered before reaching queue limit", msg1.isAlive());

	// consume msg-1 so that queue is empty again
	producer.getPendingRecordFutures().get(0).set(result);

	CheckedThread msg2 = new CheckedThread() {
		@Override
		public void go() throws Exception {
			testHarness.processElement(new StreamRecord<>("msg-2"));
		}
	};
	msg2.start();
	msg2.trySync(100);
	assertFalse("Flush triggered before reaching queue limit", msg2.isAlive());

	CheckedThread moreElementsThread = new CheckedThread() {
		@Override
		public void go() throws Exception {
			// this should block until msg-2 is consumed
			testHarness.processElement(new StreamRecord<>("msg-3"));
			// this should block until msg-3 is consumed
			testHarness.processElement(new StreamRecord<>("msg-4"));
		}
	};
	moreElementsThread.start();

	moreElementsThread.trySync(100);
	assertTrue("Producer should still block, but doesn't", moreElementsThread.isAlive());

	// consume msg-2 from the queue, leaving msg-3 in the queue and msg-4 blocked
	producer.getPendingRecordFutures().get(1).set(result);

	moreElementsThread.trySync(100);
	assertTrue("Producer should still block, but doesn't", moreElementsThread.isAlive());

	// consume msg-3, blocked msg-4 can be inserted into the queue and block is released
	producer.getPendingRecordFutures().get(2).set(result);

	moreElementsThread.trySync(100);

	assertFalse("Prodcuer still blocks although the queue is flushed", moreElementsThread.isAlive());

	producer.getPendingRecordFutures().get(3).set(result);

	testHarness.close();
}
 
Example 12
Source File: AsyncWaitOperatorTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the AysncWaitOperator can restart if checkpointed queue was full.
 *
 * <p>See FLINK-7949
 */
@Test(timeout = 10000)
public void testRestartWithFullQueue() throws Exception {
	int capacity = 10;

	// 1. create the snapshot which contains capacity + 1 elements
	final CompletableFuture<Void> trigger = new CompletableFuture<>();
	final ControllableAsyncFunction<Integer> controllableAsyncFunction = new ControllableAsyncFunction<>(trigger);

	final OneInputStreamOperatorTestHarness<Integer, Integer> snapshotHarness = new OneInputStreamOperatorTestHarness<>(
		new AsyncWaitOperator<>(
			controllableAsyncFunction, // the NoOpAsyncFunction is like a blocking function
			1000L,
			capacity,
			AsyncDataStream.OutputMode.ORDERED),
		IntSerializer.INSTANCE);

	snapshotHarness.open();

	final OperatorSubtaskState snapshot;

	final ArrayList<Integer> expectedOutput = new ArrayList<>(capacity + 1);

	try {
		synchronized (snapshotHarness.getCheckpointLock()) {
			for (int i = 0; i < capacity; i++) {
				snapshotHarness.processElement(i, 0L);
				expectedOutput.add(i);
			}
		}

		expectedOutput.add(capacity);

		final OneShotLatch lastElement = new OneShotLatch();

		final CheckedThread lastElementWriter = new CheckedThread() {
			@Override
			public void go() throws Exception {
				synchronized (snapshotHarness.getCheckpointLock()) {
					lastElement.trigger();
					snapshotHarness.processElement(capacity, 0L);
				}
			}
		};

		lastElementWriter.start();

		lastElement.await();

		synchronized (snapshotHarness.getCheckpointLock()) {
			// execute the snapshot within the checkpoint lock, because then it is guaranteed
			// that the lastElementWriter has written the exceeding element
			snapshot = snapshotHarness.snapshot(0L, 0L);
		}

		// trigger the computation to make the close call finish
		trigger.complete(null);
	} finally {
		synchronized (snapshotHarness.getCheckpointLock()) {
			snapshotHarness.close();
		}
	}

	// 2. restore the snapshot and check that we complete
	final OneInputStreamOperatorTestHarness<Integer, Integer> recoverHarness = new OneInputStreamOperatorTestHarness<>(
		new AsyncWaitOperator<>(
			new ControllableAsyncFunction<>(CompletableFuture.completedFuture(null)),
			1000L,
			capacity,
			AsyncDataStream.OutputMode.ORDERED),
		IntSerializer.INSTANCE);

	recoverHarness.initializeState(snapshot);

	synchronized (recoverHarness.getCheckpointLock()) {
		recoverHarness.open();
	}

	synchronized (recoverHarness.getCheckpointLock()) {
		recoverHarness.close();
	}

	final ConcurrentLinkedQueue<Object> output = recoverHarness.getOutput();

	assertThat(output.size(), Matchers.equalTo(capacity + 1));

	final ArrayList<Integer> outputElements = new ArrayList<>(capacity + 1);

	for (int i = 0; i < capacity + 1; i++) {
		StreamRecord<Integer> streamRecord = ((StreamRecord<Integer>) output.poll());
		outputElements.add(streamRecord.getValue());
	}

	assertThat(outputElements, Matchers.equalTo(expectedOutput));
}
 
Example 13
Source File: ElasticsearchSinkBaseTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that any bulk failure in the listener callbacks due to flushing on an immediately following checkpoint
 * is rethrown; we set a timeout because the test will not finish if the logic is broken.
 */
@Test(timeout = 5000)
public void testBulkFailureRethrownOnOnCheckpointAfterFlush() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and let bulk request succeed
	sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList((Exception) null));
	testHarness.processElement(new StreamRecord<>("msg-1"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	// setup the requests to be flushed in the snapshot
	testHarness.processElement(new StreamRecord<>("msg-2"));
	testHarness.processElement(new StreamRecord<>("msg-3"));
	verify(sink.getMockBulkProcessor(), times(3)).add(any(IndexRequest.class));

	CheckedThread snapshotThread = new CheckedThread() {
		@Override
		public void go() throws Exception {
			testHarness.snapshot(1L, 1000L);
		}
	};
	snapshotThread.start();

	// the snapshot should eventually be blocked before snapshot triggers flushing
	while (snapshotThread.getState() != Thread.State.WAITING) {
		Thread.sleep(10);
	}

	// for the snapshot-triggered flush, we let the bulk request fail completely
	sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));

	// let the snapshot-triggered flush continue (bulk request should fail completely)
	sink.continueFlush();

	try {
		snapshotThread.sync();
	} catch (Exception e) {
		// the snapshot should have failed with the bulk request failure
		Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for bulk request"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example 14
Source File: AsyncWaitOperatorTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that the AysncWaitOperator can restart if checkpointed queue was full.
 *
 * <p>See FLINK-7949
 */
@Test(timeout = 10000)
public void testRestartWithFullQueue() throws Exception {
	int capacity = 10;

	// 1. create the snapshot which contains capacity + 1 elements
	final CompletableFuture<Void> trigger = new CompletableFuture<>();
	final ControllableAsyncFunction<Integer> controllableAsyncFunction = new ControllableAsyncFunction<>(trigger);

	final OneInputStreamOperatorTestHarness<Integer, Integer> snapshotHarness = new OneInputStreamOperatorTestHarness<>(
		new AsyncWaitOperator<>(
			controllableAsyncFunction, // the NoOpAsyncFunction is like a blocking function
			1000L,
			capacity,
			AsyncDataStream.OutputMode.ORDERED),
		IntSerializer.INSTANCE);

	snapshotHarness.open();

	final OperatorSubtaskState snapshot;

	final ArrayList<Integer> expectedOutput = new ArrayList<>(capacity + 1);

	try {
		synchronized (snapshotHarness.getCheckpointLock()) {
			for (int i = 0; i < capacity; i++) {
				snapshotHarness.processElement(i, 0L);
				expectedOutput.add(i);
			}
		}

		expectedOutput.add(capacity);

		final OneShotLatch lastElement = new OneShotLatch();

		final CheckedThread lastElementWriter = new CheckedThread() {
			@Override
			public void go() throws Exception {
				synchronized (snapshotHarness.getCheckpointLock()) {
					lastElement.trigger();
					snapshotHarness.processElement(capacity, 0L);
				}
			}
		};

		lastElementWriter.start();

		lastElement.await();

		synchronized (snapshotHarness.getCheckpointLock()) {
			// execute the snapshot within the checkpoint lock, because then it is guaranteed
			// that the lastElementWriter has written the exceeding element
			snapshot = snapshotHarness.snapshot(0L, 0L);
		}

		// trigger the computation to make the close call finish
		trigger.complete(null);
	} finally {
		synchronized (snapshotHarness.getCheckpointLock()) {
			snapshotHarness.close();
		}
	}

	// 2. restore the snapshot and check that we complete
	final OneInputStreamOperatorTestHarness<Integer, Integer> recoverHarness = new OneInputStreamOperatorTestHarness<>(
		new AsyncWaitOperator<>(
			new ControllableAsyncFunction<>(CompletableFuture.completedFuture(null)),
			1000L,
			capacity,
			AsyncDataStream.OutputMode.ORDERED),
		IntSerializer.INSTANCE);

	recoverHarness.initializeState(snapshot);

	synchronized (recoverHarness.getCheckpointLock()) {
		recoverHarness.open();
	}

	synchronized (recoverHarness.getCheckpointLock()) {
		recoverHarness.close();
	}

	final ConcurrentLinkedQueue<Object> output = recoverHarness.getOutput();

	assertThat(output.size(), Matchers.equalTo(capacity + 1));

	final ArrayList<Integer> outputElements = new ArrayList<>(capacity + 1);

	for (int i = 0; i < capacity + 1; i++) {
		StreamRecord<Integer> streamRecord = ((StreamRecord<Integer>) output.poll());
		outputElements.add(streamRecord.getValue());
	}

	assertThat(outputElements, Matchers.equalTo(expectedOutput));
}
 
Example 15
Source File: FlinkKinesisProducerTest.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Test ensuring that the producer is not dropping buffered records;
 * we set a timeout because the test will not finish if the logic is broken.
 */
@SuppressWarnings({"unchecked", "ResultOfMethodCallIgnored"})
@Test(timeout = 10000)
public void testAtLeastOnceProducer() throws Throwable {
	final DummyFlinkKinesisProducer<String> producer = new DummyFlinkKinesisProducer<>(new SimpleStringSchema());

	OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer));

	testHarness.open();

	testHarness.processElement(new StreamRecord<>("msg-1"));
	testHarness.processElement(new StreamRecord<>("msg-2"));
	testHarness.processElement(new StreamRecord<>("msg-3"));

	// start a thread to perform checkpointing
	CheckedThread snapshotThread = new CheckedThread() {
		@Override
		public void go() throws Exception {
			// this should block until all records are flushed;
			// if the snapshot implementation returns before pending records are flushed,
			testHarness.snapshot(123L, 123L);
		}
	};
	snapshotThread.start();

	// before proceeding, make sure that flushing has started and that the snapshot is still blocked;
	// this would block forever if the snapshot didn't perform a flush
	producer.waitUntilFlushStarted();
	Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive());

	// now, complete the callbacks
	UserRecordResult result = mock(UserRecordResult.class);
	when(result.isSuccessful()).thenReturn(true);

	producer.getPendingRecordFutures().get(0).set(result);
	Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive());

	producer.getPendingRecordFutures().get(1).set(result);
	Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive());

	producer.getPendingRecordFutures().get(2).set(result);

	// this would fail with an exception if flushing wasn't completed before the snapshot method returned
	snapshotThread.sync();

	testHarness.close();
}
 
Example 16
Source File: JobRetrievalITCase.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testJobRetrieval() throws Exception {
	final JobID jobID = new JobID();

	final JobVertex imalock = new JobVertex("imalock");
	imalock.setInvokableClass(SemaphoreInvokable.class);

	final JobGraph jobGraph = new JobGraph(jobID, "testjob", imalock);

	// acquire the lock to make sure that the job cannot complete until the job client
	// has been attached in resumingThread
	lock.acquire();

	client.setDetached(true);
	client.submitJob(jobGraph, JobRetrievalITCase.class.getClassLoader());

	final CheckedThread resumingThread = new CheckedThread("Flink-Job-Retriever") {
		@Override
		public void go() throws Exception {
			assertNotNull(client.requestJobResult(jobID).get());
		}
	};

	// wait until the job is running
	while (client.listJobs().get().isEmpty()) {
		Thread.sleep(50);
	}

	// kick off resuming
	resumingThread.start();

	// wait for client to connect
	while (resumingThread.getState() != Thread.State.WAITING) {
		Thread.sleep(10);
	}

	// client has connected, we can release the lock
	lock.release();

	resumingThread.sync();
}
 
Example 17
Source File: KinesisDataFetcherTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testSkipCorruptedRecord() throws Exception {
	final String stream = "fakeStream";
	final int numShards = 3;

	final LinkedList<KinesisStreamShardState> testShardStates = new LinkedList<>();
	final TestSourceContext<String> sourceContext = new TestSourceContext<>();

	final TestableKinesisDataFetcher<String> fetcher = new TestableKinesisDataFetcher<>(
		Collections.singletonList(stream),
		sourceContext,
		TestUtils.getStandardProperties(),
		new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
		1,
		0,
		new AtomicReference<>(),
		testShardStates,
		new HashMap<>(),
		FakeKinesisBehavioursFactory.nonReshardedStreamsBehaviour(Collections.singletonMap(stream, numShards)));

	// FlinkKinesisConsumer is responsible for setting up the fetcher before it can be run;
	// run the consumer until it reaches the point where the fetcher starts to run
	final DummyFlinkKinesisConsumer<String> consumer = new DummyFlinkKinesisConsumer<>(TestUtils.getStandardProperties(), fetcher, 1, 0);

	CheckedThread consumerThread = new CheckedThread() {
		@Override
		public void go() throws Exception {
			consumer.run(new TestSourceContext<>());
		}
	};
	consumerThread.start();

	fetcher.waitUntilRun();
	consumer.cancel();
	consumerThread.sync();

	assertEquals(numShards, testShardStates.size());

	for (int i = 0; i < numShards; i++) {
		fetcher.emitRecordAndUpdateState("record-" + i, 10L, i, new SequenceNumber("seq-num-1"));
		assertEquals(new SequenceNumber("seq-num-1"), testShardStates.get(i).getLastProcessedSequenceNum());
		assertEquals(new StreamRecord<>("record-" + i, 10L), sourceContext.removeLatestOutput());
	}

	// emitting a null (i.e., a corrupt record) should not produce any output, but still have the shard state updated
	fetcher.emitRecordAndUpdateState(null, 10L, 1, new SequenceNumber("seq-num-2"));
		assertEquals(new SequenceNumber("seq-num-2"), testShardStates.get(1).getLastProcessedSequenceNum());
	assertEquals(null, sourceContext.removeLatestOutput()); // no output should have been collected
}
 
Example 18
Source File: FlinkKafkaProducerBaseTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Test ensuring that if an async exception is caught for one of the flushed requests on checkpoint,
 * it should be rethrown; we set a timeout because the test will not finish if the logic is broken.
 *
 * <p>Note that this test does not test the snapshot method is blocked correctly when there are pending records.
 * The test for that is covered in testAtLeastOnceProducer.
 */
@SuppressWarnings("unchecked")
@Test(timeout = 5000)
public void testAsyncErrorRethrownOnCheckpointAfterFlush() throws Throwable {
	final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>(
		FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null);
	producer.setFlushOnCheckpoint(true);

	final KafkaProducer<?, ?> mockProducer = producer.getMockKafkaProducer();

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer));

	testHarness.open();

	testHarness.processElement(new StreamRecord<>("msg-1"));
	testHarness.processElement(new StreamRecord<>("msg-2"));
	testHarness.processElement(new StreamRecord<>("msg-3"));

	verify(mockProducer, times(3)).send(any(ProducerRecord.class), any(Callback.class));

	// only let the first callback succeed for now
	producer.getPendingCallbacks().get(0).onCompletion(null, null);

	CheckedThread snapshotThread = new CheckedThread() {
		@Override
		public void go() throws Exception {
			// this should block at first, since there are still two pending records that needs to be flushed
			testHarness.snapshot(123L, 123L);
		}
	};
	snapshotThread.start();

	// let the 2nd message fail with an async exception
	producer.getPendingCallbacks().get(1).onCompletion(null, new Exception("artificial async failure for 2nd message"));
	producer.getPendingCallbacks().get(2).onCompletion(null, null);

	try {
		snapshotThread.sync();
	} catch (Exception e) {
		// the snapshot should have failed with the async exception
		Assert.assertTrue(e.getCause().getMessage().contains("artificial async failure for 2nd message"));

		// test succeeded
		return;
	}

	Assert.fail();
}
 
Example 19
Source File: KinesisDataFetcherTest.java    From flink with Apache License 2.0 4 votes vote down vote up
@Test
public void testStreamToLastSeenShardStateIsCorrectlySetWhenNewShardsFoundSinceRestoredCheckpoint() throws Exception {
	List<String> fakeStreams = new LinkedList<>();
	fakeStreams.add("fakeStream1");
	fakeStreams.add("fakeStream2");

	Map<StreamShardHandle, String> restoredStateUnderTest = new HashMap<>();

	// fakeStream1 has 3 shards before restore
	restoredStateUnderTest.put(
		new StreamShardHandle(
			"fakeStream1",
			new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(0))),
		UUID.randomUUID().toString());
	restoredStateUnderTest.put(
		new StreamShardHandle(
			"fakeStream1",
			new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(1))),
		UUID.randomUUID().toString());
	restoredStateUnderTest.put(
		new StreamShardHandle(
			"fakeStream1",
			new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(2))),
		UUID.randomUUID().toString());

	// fakeStream2 has 2 shards before restore
	restoredStateUnderTest.put(
		new StreamShardHandle(
			"fakeStream2",
			new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(0))),
		UUID.randomUUID().toString());
	restoredStateUnderTest.put(
		new StreamShardHandle(
			"fakeStream2",
			new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(1))),
		UUID.randomUUID().toString());

	Map<String, Integer> streamToShardCount = new HashMap<>();
	streamToShardCount.put("fakeStream1", 3 + 1); // fakeStream1 had 3 shards before & 1 new shard after restore
	streamToShardCount.put("fakeStream2", 2 + 3); // fakeStream2 had 2 shards before & 3 new shard after restore

	HashMap<String, String> subscribedStreamsToLastSeenShardIdsUnderTest =
		KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(fakeStreams);

	// using a non-resharded streams kinesis behaviour to represent that Kinesis is not resharded AFTER the restore
	final TestableKinesisDataFetcher<String> fetcher =
		new TestableKinesisDataFetcher<>(
			fakeStreams,
			new TestSourceContext<>(),
			TestUtils.getStandardProperties(),
			new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()),
			10,
			2,
			new AtomicReference<>(),
			new LinkedList<>(),
			subscribedStreamsToLastSeenShardIdsUnderTest,
			FakeKinesisBehavioursFactory.nonReshardedStreamsBehaviour(streamToShardCount));

	for (Map.Entry<StreamShardHandle, String> restoredState : restoredStateUnderTest.entrySet()) {
		fetcher.advanceLastDiscoveredShardOfStream(restoredState.getKey().getStreamName(), restoredState.getKey().getShard().getShardId());
		fetcher.registerNewSubscribedShardState(
			new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(restoredState.getKey()),
				restoredState.getKey(), new SequenceNumber(restoredState.getValue())));
	}

	CheckedThread runFetcherThread = new CheckedThread() {
		@Override
		public void go() throws Exception {
			fetcher.runFetcher();
		}
	};
	runFetcherThread.start();

	fetcher.waitUntilInitialDiscovery();
	fetcher.shutdownFetcher();
	runFetcherThread.sync();

	// assert that the streams tracked in the state are identical to the subscribed streams
	Set<String> streamsInState = subscribedStreamsToLastSeenShardIdsUnderTest.keySet();
	assertEquals(fakeStreams.size(), streamsInState.size());
	assertTrue(streamsInState.containsAll(fakeStreams));

	// assert that the last seen shards in state is correctly set
	for (Map.Entry<String, String> streamToLastSeenShard : subscribedStreamsToLastSeenShardIdsUnderTest.entrySet()) {
		assertEquals(
			KinesisShardIdGenerator.generateFromShardOrder(streamToShardCount.get(streamToLastSeenShard.getKey()) - 1),
			streamToLastSeenShard.getValue());
	}
}
 
Example 20
Source File: ElasticsearchSinkBaseTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Tests that any bulk failure in the listener callbacks due to flushing on an immediately following checkpoint
 * is rethrown; we set a timeout because the test will not finish if the logic is broken.
 */
@Test(timeout = 5000)
public void testBulkFailureRethrownOnOnCheckpointAfterFlush() throws Throwable {
	final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>(
		new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler());

	final OneInputStreamOperatorTestHarness<String, Object> testHarness =
		new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink));

	testHarness.open();

	// setup the next bulk request, and let bulk request succeed
	sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList((Exception) null));
	testHarness.processElement(new StreamRecord<>("msg-1"));
	verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class));

	// manually execute the next bulk request
	sink.manualBulkRequestWithAllPendingRequests();

	// setup the requests to be flushed in the snapshot
	testHarness.processElement(new StreamRecord<>("msg-2"));
	testHarness.processElement(new StreamRecord<>("msg-3"));
	verify(sink.getMockBulkProcessor(), times(3)).add(any(IndexRequest.class));

	CheckedThread snapshotThread = new CheckedThread() {
		@Override
		public void go() throws Exception {
			testHarness.snapshot(1L, 1000L);
		}
	};
	snapshotThread.start();

	// the snapshot should eventually be blocked before snapshot triggers flushing
	while (snapshotThread.getState() != Thread.State.WAITING) {
		Thread.sleep(10);
	}

	// for the snapshot-triggered flush, we let the bulk request fail completely
	sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request"));

	// let the snapshot-triggered flush continue (bulk request should fail completely)
	sink.continueFlush();

	try {
		snapshotThread.sync();
	} catch (Exception e) {
		// the snapshot should have failed with the bulk request failure
		Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for bulk request"));

		// test succeeded
		return;
	}

	Assert.fail();
}