Java Code Examples for org.apache.flink.core.testutils.CheckedThread#sync()
The following examples show how to use
org.apache.flink.core.testutils.CheckedThread#sync() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FileUtilsTest.java From flink with Apache License 2.0 | 6 votes |
@Ignore @Test public void testDeleteDirectoryConcurrently() throws Exception { final File parent = tmp.newFolder(); generateRandomDirs(parent, 20, 5, 3); // start three concurrent threads that delete the contents CheckedThread t1 = new Deleter(parent); CheckedThread t2 = new Deleter(parent); CheckedThread t3 = new Deleter(parent); t1.start(); t2.start(); t3.start(); t1.sync(); t2.sync(); t3.sync(); // assert is empty assertFalse(parent.exists()); }
Example 2
Source File: AvroSerializerConcurrencyTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testConcurrentUseOfSerializer() throws Exception { final AvroSerializer<String> serializer = new AvroSerializer<>(String.class); final BlockerSync sync = new BlockerSync(); final DataOutputView regularOut = new DataOutputSerializer(32); final DataOutputView lockingOut = new LockingView(sync); // this thread serializes and gets stuck there final CheckedThread thread = new CheckedThread("serializer") { @Override public void go() throws Exception { serializer.serialize("a value", lockingOut); } }; thread.start(); sync.awaitBlocker(); // this should fail with an exception try { serializer.serialize("value", regularOut); fail("should have failed with an exception"); } catch (IllegalStateException e) { // expected } finally { // release the thread that serializes sync.releaseBlocker(); } // this propagates exceptions from the spawned thread thread.sync(); }
Example 3
Source File: NetworkBufferPoolTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests {@link NetworkBufferPool#requestMemorySegments()} and verifies it will end exceptionally * when failing to acquire all the segments in the specific timeout. */ @Test public void testRequestMemorySegmentsTimeout() throws Exception { final int numBuffers = 10; final int numberOfSegmentsToRequest = 2; final Duration requestSegmentsTimeout = Duration.ofMillis(50L); NetworkBufferPool globalPool = new NetworkBufferPool( numBuffers, 128, numberOfSegmentsToRequest, requestSegmentsTimeout); BufferPool localBufferPool = globalPool.createBufferPool(0, numBuffers); for (int i = 0; i < numBuffers; ++i) { localBufferPool.requestBuffer(); } assertEquals(0, globalPool.getNumberOfAvailableMemorySegments()); CheckedThread asyncRequest = new CheckedThread() { @Override public void go() throws Exception { globalPool.requestMemorySegments(); } }; asyncRequest.start(); expectedException.expect(IOException.class); expectedException.expectMessage("Timeout"); try { asyncRequest.sync(); } finally { globalPool.destroy(); } }
Example 4
Source File: KryoSerializerConcurrencyTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testConcurrentUseOfSerializer() throws Exception { final KryoSerializer<String> serializer = new KryoSerializer<>(String.class, new ExecutionConfig()); final BlockerSync sync = new BlockerSync(); final DataOutputView regularOut = new DataOutputSerializer(32); final DataOutputView lockingOut = new LockingView(sync); // this thread serializes and gets stuck there final CheckedThread thread = new CheckedThread("serializer") { @Override public void go() throws Exception { serializer.serialize("a value", lockingOut); } }; thread.start(); sync.awaitBlocker(); // this should fail with an exception try { serializer.serialize("value", regularOut); fail("should have failed with an exception"); } catch (IllegalStateException e) { // expected } finally { // release the thread that serializes sync.releaseBlocker(); } // this propagates exceptions from the spawned thread thread.sync(); }
Example 5
Source File: NetworkBufferPoolTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Tests {@link NetworkBufferPool#requestMemorySegments(int)}, verifying it may be aborted in * case of a concurrent {@link NetworkBufferPool#destroy()} call. */ @Test public void testRequestMemorySegmentsInterruptable() throws Exception { final int numBuffers = 10; NetworkBufferPool globalPool = new NetworkBufferPool(numBuffers, 128); MemorySegment segment = globalPool.requestMemorySegment(); assertNotNull(segment); final OneShotLatch isRunning = new OneShotLatch(); CheckedThread asyncRequest = new CheckedThread() { @Override public void go() throws Exception { isRunning.trigger(); globalPool.requestMemorySegments(10); } }; asyncRequest.start(); // We want the destroy call inside the blocking part of the globalPool.requestMemorySegments() // call above. We cannot guarantee this though but make it highly probable: isRunning.await(); Thread.sleep(10); globalPool.destroy(); segment.free(); expectedException.expect(IllegalStateException.class); expectedException.expectMessage("destroyed"); try { asyncRequest.sync(); } finally { globalPool.destroy(); } }
Example 6
Source File: NetworkBufferPoolTest.java From flink with Apache License 2.0 | 5 votes |
/** * Tests {@link NetworkBufferPool#requestMemorySegments()}, verifying it may be aborted and * remains in a defined state even if the waiting is interrupted. */ @Test public void testRequestMemorySegmentsInterruptable2() throws Exception { final int numBuffers = 10; NetworkBufferPool globalPool = new NetworkBufferPool(numBuffers, 128, 10); MemorySegment segment = globalPool.requestMemorySegment(); assertNotNull(segment); final OneShotLatch isRunning = new OneShotLatch(); CheckedThread asyncRequest = new CheckedThread() { @Override public void go() throws Exception { isRunning.trigger(); globalPool.requestMemorySegments(); } }; asyncRequest.start(); // We want the destroy call inside the blocking part of the globalPool.requestMemorySegments() // call above. We cannot guarantee this though but make it highly probable: isRunning.await(); Thread.sleep(10); asyncRequest.interrupt(); globalPool.recycle(segment); try { asyncRequest.sync(); } catch (IOException e) { assertThat(e, hasProperty("cause", instanceOf(InterruptedException.class))); // test indirectly for NetworkBufferPool#numTotalRequiredBuffers being correct: // -> creating a new buffer pool should not fail globalPool.createBufferPool(10, 10); } finally { globalPool.destroy(); } }
Example 7
Source File: KinesisDataFetcherTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testStreamToLastSeenShardStateIsCorrectlySetWhenNoNewShardsSinceRestoredCheckpointAndSomeStreamsDoNotExist() throws Exception { List<String> fakeStreams = new LinkedList<>(); fakeStreams.add("fakeStream1"); fakeStreams.add("fakeStream2"); fakeStreams.add("fakeStream3"); // fakeStream3 will not have any shards fakeStreams.add("fakeStream4"); // fakeStream4 will not have any shards Map<StreamShardHandle, String> restoredStateUnderTest = new HashMap<>(); // fakeStream1 has 3 shards before restore restoredStateUnderTest.put( new StreamShardHandle( "fakeStream1", new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(0))), UUID.randomUUID().toString()); restoredStateUnderTest.put( new StreamShardHandle( "fakeStream1", new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(1))), UUID.randomUUID().toString()); restoredStateUnderTest.put( new StreamShardHandle( "fakeStream1", new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(2))), UUID.randomUUID().toString()); // fakeStream2 has 2 shards before restore restoredStateUnderTest.put( new StreamShardHandle( "fakeStream2", new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(0))), UUID.randomUUID().toString()); restoredStateUnderTest.put( new StreamShardHandle( "fakeStream2", new Shard().withShardId(KinesisShardIdGenerator.generateFromShardOrder(1))), UUID.randomUUID().toString()); Map<String, Integer> streamToShardCount = new HashMap<>(); streamToShardCount.put("fakeStream1", 3); // fakeStream1 has fixed 3 shards streamToShardCount.put("fakeStream2", 2); // fakeStream2 has fixed 2 shards streamToShardCount.put("fakeStream3", 0); // no shards can be found for fakeStream3 streamToShardCount.put("fakeStream4", 0); // no shards can be found for fakeStream4 HashMap<String, String> subscribedStreamsToLastSeenShardIdsUnderTest = KinesisDataFetcher.createInitialSubscribedStreamsToLastDiscoveredShardsState(fakeStreams); // using a non-resharded streams kinesis behaviour to represent that Kinesis is not resharded AFTER the restore final TestableKinesisDataFetcher<String> fetcher = new TestableKinesisDataFetcher<>( fakeStreams, new TestSourceContext<>(), TestUtils.getStandardProperties(), new KinesisDeserializationSchemaWrapper<>(new SimpleStringSchema()), 10, 2, new AtomicReference<>(), new LinkedList<>(), subscribedStreamsToLastSeenShardIdsUnderTest, FakeKinesisBehavioursFactory.nonReshardedStreamsBehaviour(streamToShardCount)); for (Map.Entry<StreamShardHandle, String> restoredState : restoredStateUnderTest.entrySet()) { fetcher.advanceLastDiscoveredShardOfStream(restoredState.getKey().getStreamName(), restoredState.getKey().getShard().getShardId()); fetcher.registerNewSubscribedShardState( new KinesisStreamShardState(KinesisDataFetcher.convertToStreamShardMetadata(restoredState.getKey()), restoredState.getKey(), new SequenceNumber(restoredState.getValue()))); } CheckedThread runFetcherThread = new CheckedThread() { @Override public void go() throws Exception { fetcher.runFetcher(); } }; runFetcherThread.start(); fetcher.waitUntilInitialDiscovery(); fetcher.shutdownFetcher(); runFetcherThread.sync(); // assert that the streams tracked in the state are identical to the subscribed streams Set<String> streamsInState = subscribedStreamsToLastSeenShardIdsUnderTest.keySet(); assertEquals(fakeStreams.size(), streamsInState.size()); assertTrue(streamsInState.containsAll(fakeStreams)); // assert that the last seen shards in state is correctly set assertEquals( KinesisShardIdGenerator.generateFromShardOrder(2), subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream1")); assertEquals( KinesisShardIdGenerator.generateFromShardOrder(1), subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream2")); assertNull(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream3")); assertNull(subscribedStreamsToLastSeenShardIdsUnderTest.get("fakeStream4")); }
Example 8
Source File: ElasticsearchSinkBaseTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that any bulk failure in the listener callbacks due to flushing on an immediately following checkpoint * is rethrown; we set a timeout because the test will not finish if the logic is broken. */ @Test(timeout = 5000) public void testBulkFailureRethrownOnOnCheckpointAfterFlush() throws Throwable { final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>( new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler()); final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink)); testHarness.open(); // setup the next bulk request, and let bulk request succeed sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList((Exception) null)); testHarness.processElement(new StreamRecord<>("msg-1")); verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class)); // manually execute the next bulk request sink.manualBulkRequestWithAllPendingRequests(); // setup the requests to be flushed in the snapshot testHarness.processElement(new StreamRecord<>("msg-2")); testHarness.processElement(new StreamRecord<>("msg-3")); verify(sink.getMockBulkProcessor(), times(3)).add(any(IndexRequest.class)); CheckedThread snapshotThread = new CheckedThread() { @Override public void go() throws Exception { testHarness.snapshot(1L, 1000L); } }; snapshotThread.start(); // the snapshot should eventually be blocked before snapshot triggers flushing while (snapshotThread.getState() != Thread.State.WAITING) { Thread.sleep(10); } // for the snapshot-triggered flush, we let the bulk request fail completely sink.setFailNextBulkRequestCompletely(new Exception("artificial failure for bulk request")); // let the snapshot-triggered flush continue (bulk request should fail completely) sink.continueFlush(); try { snapshotThread.sync(); } catch (Exception e) { // the snapshot should have failed with the bulk request failure Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for bulk request")); // test succeeded return; } Assert.fail(); }
Example 9
Source File: AccumulatorLiveITCase.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
private static void submitJobAndVerifyResults(JobGraph jobGraph) throws Exception { Deadline deadline = Deadline.now().plus(Duration.ofSeconds(30)); final ClusterClient<?> client = MINI_CLUSTER_RESOURCE.getClusterClient(); final CheckedThread submissionThread = new CheckedThread() { @Override public void go() throws Exception { client.submitJob(jobGraph, AccumulatorLiveITCase.class.getClassLoader()); } }; submissionThread.start(); try { NotifyingMapper.notifyLatch.await(); FutureUtils.retrySuccessfulWithDelay( () -> { try { return CompletableFuture.completedFuture(client.getAccumulators(jobGraph.getJobID())); } catch (Exception e) { return FutureUtils.completedExceptionally(e); } }, Time.milliseconds(20), deadline, accumulators -> accumulators.size() == 1 && accumulators.containsKey(ACCUMULATOR_NAME) && (int) accumulators.get(ACCUMULATOR_NAME).getUnchecked() == NUM_ITERATIONS, TestingUtils.defaultScheduledExecutor() ).get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS); NotifyingMapper.shutdownLatch.trigger(); } finally { NotifyingMapper.shutdownLatch.trigger(); // wait for the job to have terminated submissionThread.sync(); } }
Example 10
Source File: ElasticsearchSinkBaseTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that any item failure in the listener callbacks due to flushing on an immediately following checkpoint * is rethrown; we set a timeout because the test will not finish if the logic is broken. */ @Test(timeout = 5000) public void testItemFailureRethrownOnCheckpointAfterFlush() throws Throwable { final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>( new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler()); final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink)); testHarness.open(); // setup the next bulk request, and its mock item failures List<Exception> mockResponsesList = new ArrayList<>(2); mockResponsesList.add(null); // the first request in a bulk will succeed mockResponsesList.add(new Exception("artificial failure for record")); // the second request in a bulk will fail sink.setMockItemFailuresListForNextBulkItemResponses(mockResponsesList); testHarness.processElement(new StreamRecord<>("msg-1")); verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class)); // manually execute the next bulk request (1 request only, thus should succeed) sink.manualBulkRequestWithAllPendingRequests(); // setup the requests to be flushed in the snapshot testHarness.processElement(new StreamRecord<>("msg-2")); testHarness.processElement(new StreamRecord<>("msg-3")); verify(sink.getMockBulkProcessor(), times(3)).add(any(IndexRequest.class)); CheckedThread snapshotThread = new CheckedThread() { @Override public void go() throws Exception { testHarness.snapshot(1L, 1000L); } }; snapshotThread.start(); // the snapshot should eventually be blocked before snapshot triggers flushing while (snapshotThread.getState() != Thread.State.WAITING) { Thread.sleep(10); } // let the snapshot-triggered flush continue (2 records in the bulk, so the 2nd one should fail) sink.continueFlush(); try { snapshotThread.sync(); } catch (Exception e) { // the snapshot should have failed with the failure from the 2nd request Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for record")); // test succeeded return; } Assert.fail(); }
Example 11
Source File: ElasticsearchSinkBaseTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that any item failure in the listener callbacks due to flushing on an immediately following checkpoint * is rethrown; we set a timeout because the test will not finish if the logic is broken. */ @Test(timeout = 5000) public void testItemFailureRethrownOnCheckpointAfterFlush() throws Throwable { final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>( new HashMap<String, String>(), new SimpleSinkFunction<String>(), new NoOpFailureHandler()); final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink)); testHarness.open(); // setup the next bulk request, and its mock item failures List<Exception> mockResponsesList = new ArrayList<>(2); mockResponsesList.add(null); // the first request in a bulk will succeed mockResponsesList.add(new Exception("artificial failure for record")); // the second request in a bulk will fail sink.setMockItemFailuresListForNextBulkItemResponses(mockResponsesList); testHarness.processElement(new StreamRecord<>("msg-1")); verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class)); // manually execute the next bulk request (1 request only, thus should succeed) sink.manualBulkRequestWithAllPendingRequests(); // setup the requests to be flushed in the snapshot testHarness.processElement(new StreamRecord<>("msg-2")); testHarness.processElement(new StreamRecord<>("msg-3")); verify(sink.getMockBulkProcessor(), times(3)).add(any(IndexRequest.class)); CheckedThread snapshotThread = new CheckedThread() { @Override public void go() throws Exception { testHarness.snapshot(1L, 1000L); } }; snapshotThread.start(); // the snapshot should eventually be blocked before snapshot triggers flushing while (snapshotThread.getState() != Thread.State.WAITING) { Thread.sleep(10); } // let the snapshot-triggered flush continue (2 records in the bulk, so the 2nd one should fail) sink.continueFlush(); try { snapshotThread.sync(); } catch (Exception e) { // the snapshot should have failed with the failure from the 2nd request Assert.assertTrue(e.getCause().getCause().getMessage().contains("artificial failure for record")); // test succeeded return; } Assert.fail(); }
Example 12
Source File: FlinkKinesisProducerTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Test ensuring that if an async exception is caught for one of the flushed requests on checkpoint, * it should be rethrown; we set a timeout because the test will not finish if the logic is broken. * * <p>Note that this test does not test the snapshot method is blocked correctly when there are pending records. * The test for that is covered in testAtLeastOnceProducer. */ @SuppressWarnings("ResultOfMethodCallIgnored") @Test(timeout = 10000) public void testAsyncErrorRethrownAfterFlush() throws Throwable { final DummyFlinkKinesisProducer<String> producer = new DummyFlinkKinesisProducer<>(new SimpleStringSchema()); OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer)); testHarness.open(); testHarness.processElement(new StreamRecord<>("msg-1")); testHarness.processElement(new StreamRecord<>("msg-2")); testHarness.processElement(new StreamRecord<>("msg-3")); // only let the first record succeed for now UserRecordResult result = mock(UserRecordResult.class); when(result.isSuccessful()).thenReturn(true); producer.getPendingRecordFutures().get(0).set(result); CheckedThread snapshotThread = new CheckedThread() { @Override public void go() throws Exception { // this should block at first, since there are still two pending records that needs to be flushed testHarness.snapshot(123L, 123L); } }; snapshotThread.start(); // let the 2nd message fail with an async exception producer.getPendingRecordFutures().get(1).setException(new Exception("artificial async failure for 2nd message")); producer.getPendingRecordFutures().get(2).set(mock(UserRecordResult.class)); try { snapshotThread.sync(); } catch (Exception e) { // after the flush, the async exception should have been rethrown Assert.assertTrue(ExceptionUtils.findThrowableWithMessage(e, "artificial async failure for 2nd message").isPresent()); // test succeeded return; } Assert.fail(); }
Example 13
Source File: FlinkKinesisProducerTest.java From flink with Apache License 2.0 | 4 votes |
/** * Test ensuring that the producer is not dropping buffered records; * we set a timeout because the test will not finish if the logic is broken. */ @SuppressWarnings({"unchecked", "ResultOfMethodCallIgnored"}) @Test(timeout = 10000) public void testAtLeastOnceProducer() throws Throwable { final DummyFlinkKinesisProducer<String> producer = new DummyFlinkKinesisProducer<>(new SimpleStringSchema()); OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer)); testHarness.open(); testHarness.processElement(new StreamRecord<>("msg-1")); testHarness.processElement(new StreamRecord<>("msg-2")); testHarness.processElement(new StreamRecord<>("msg-3")); // start a thread to perform checkpointing CheckedThread snapshotThread = new CheckedThread() { @Override public void go() throws Exception { // this should block until all records are flushed; // if the snapshot implementation returns before pending records are flushed, testHarness.snapshot(123L, 123L); } }; snapshotThread.start(); // before proceeding, make sure that flushing has started and that the snapshot is still blocked; // this would block forever if the snapshot didn't perform a flush producer.waitUntilFlushStarted(); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); // now, complete the callbacks UserRecordResult result = mock(UserRecordResult.class); when(result.isSuccessful()).thenReturn(true); producer.getPendingRecordFutures().get(0).set(result); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); producer.getPendingRecordFutures().get(1).set(result); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); producer.getPendingRecordFutures().get(2).set(result); // this would fail with an exception if flushing wasn't completed before the snapshot method returned snapshotThread.sync(); testHarness.close(); }
Example 14
Source File: FlinkKafkaProducerBaseTest.java From flink with Apache License 2.0 | 4 votes |
/** * Test ensuring that the producer is not dropping buffered records; * we set a timeout because the test will not finish if the logic is broken. */ @SuppressWarnings("unchecked") @Test(timeout = 10000) public void testAtLeastOnceProducer() throws Throwable { final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null); producer.setFlushOnCheckpoint(true); final KafkaProducer<?, ?> mockProducer = producer.getMockKafkaProducer(); final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer)); testHarness.open(); testHarness.processElement(new StreamRecord<>("msg-1")); testHarness.processElement(new StreamRecord<>("msg-2")); testHarness.processElement(new StreamRecord<>("msg-3")); verify(mockProducer, times(3)).send(any(ProducerRecord.class), any(Callback.class)); Assert.assertEquals(3, producer.getPendingSize()); // start a thread to perform checkpointing CheckedThread snapshotThread = new CheckedThread() { @Override public void go() throws Exception { // this should block until all records are flushed; // if the snapshot implementation returns before pending records are flushed, testHarness.snapshot(123L, 123L); } }; snapshotThread.start(); // before proceeding, make sure that flushing has started and that the snapshot is still blocked; // this would block forever if the snapshot didn't perform a flush producer.waitUntilFlushStarted(); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); // now, complete the callbacks producer.getPendingCallbacks().get(0).onCompletion(null, null); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); Assert.assertEquals(2, producer.getPendingSize()); producer.getPendingCallbacks().get(1).onCompletion(null, null); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); Assert.assertEquals(1, producer.getPendingSize()); producer.getPendingCallbacks().get(2).onCompletion(null, null); Assert.assertEquals(0, producer.getPendingSize()); // this would fail with an exception if flushing wasn't completed before the snapshot method returned snapshotThread.sync(); testHarness.close(); }
Example 15
Source File: ElasticsearchSinkBaseTest.java From flink with Apache License 2.0 | 4 votes |
/** * Tests that the sink correctly waits for pending requests (including re-added requests) on checkpoints; * we set a timeout because the test will not finish if the logic is broken. */ @Test(timeout = 5000) public void testAtLeastOnceSink() throws Throwable { final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>( new HashMap<String, String>(), new SimpleSinkFunction<String>(), new DummyRetryFailureHandler()); // use a failure handler that simply re-adds requests final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink)); testHarness.open(); // setup the next bulk request, and its mock item failures; // it contains 1 request, which will fail and re-added to the next bulk request sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record"))); testHarness.processElement(new StreamRecord<>("msg")); verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class)); CheckedThread snapshotThread = new CheckedThread() { @Override public void go() throws Exception { testHarness.snapshot(1L, 1000L); } }; snapshotThread.start(); // the snapshot should eventually be blocked before snapshot triggers flushing while (snapshotThread.getState() != Thread.State.WAITING) { Thread.sleep(10); } sink.continueFlush(); // since the previous flush should have resulted in a request re-add from the failure handler, // we should have flushed again, and eventually be blocked before snapshot triggers the 2nd flush while (snapshotThread.getState() != Thread.State.WAITING) { Thread.sleep(10); } // current number of pending request should be 1 due to the re-add Assert.assertEquals(1, sink.getNumPendingRequests()); // this time, let the bulk request succeed, so no-more requests are re-added sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList((Exception) null)); sink.continueFlush(); // the snapshot should finish with no exceptions snapshotThread.sync(); testHarness.close(); }
Example 16
Source File: FlinkPulsarSinkTest.java From pulsar-flink with Apache License 2.0 | 4 votes |
/** * Test ensuring that if an async exception is caught for one of the flushed requests on checkpoint, * it should be rethrown; we set a timeout because the test will not finish if the logic is broken. * * <p>Note that this test does not test the snapshot method is blocked correctly when there are pending records. * The test for that is covered in testAtLeastOnceProducer. */ @SuppressWarnings("unchecked") @Test//(timeout = 5000) public void testAsyncErrorRethrownOnCheckpointAfterFlush() throws Throwable { final DummyFlinkPulsarSink<String> sink = new DummyFlinkPulsarSink<>(dummyClientConf(), dummyProperties(), mock(TopicKeyExtractor.class), null); Producer mockProducer = sink.getProducer("tp"); final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink)); testHarness.open(); testHarness.processElement(new StreamRecord<>("msg-1")); testHarness.processElement(new StreamRecord<>("msg-2")); testHarness.processElement(new StreamRecord<>("msg-3")); verify(mockProducer, times(3)).newMessage(); // only let the first callback succeed for now sink.getPendingCallbacks().get(0).accept(null, null); CheckedThread snapshotThread = new CheckedThread() { @Override public void go() throws Exception { // this should block at first, since there are still two pending records that needs to be flushed testHarness.snapshot(123L, 123L); } }; snapshotThread.start(); // let the 2nd message fail with an async exception sink.getPendingCallbacks().get(1).accept(null, new Exception("artificial async failure for 2nd message")); sink.getPendingCallbacks().get(2).accept(null, null); try { snapshotThread.sync(); } catch (Exception e) { // the snapshot should have failed with the async exception Assert.assertTrue(e.getCause().getMessage().contains("artificial async failure for 2nd message")); // test succeeded return; } Assert.fail(); }
Example 17
Source File: FlinkKafkaProducerBaseTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Test ensuring that the producer is not dropping buffered records; * we set a timeout because the test will not finish if the logic is broken. */ @SuppressWarnings("unchecked") @Test(timeout = 10000) public void testAtLeastOnceProducer() throws Throwable { final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null); producer.setFlushOnCheckpoint(true); final KafkaProducer<?, ?> mockProducer = producer.getMockKafkaProducer(); final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer)); testHarness.open(); testHarness.processElement(new StreamRecord<>("msg-1")); testHarness.processElement(new StreamRecord<>("msg-2")); testHarness.processElement(new StreamRecord<>("msg-3")); verify(mockProducer, times(3)).send(any(ProducerRecord.class), any(Callback.class)); Assert.assertEquals(3, producer.getPendingSize()); // start a thread to perform checkpointing CheckedThread snapshotThread = new CheckedThread() { @Override public void go() throws Exception { // this should block until all records are flushed; // if the snapshot implementation returns before pending records are flushed, testHarness.snapshot(123L, 123L); } }; snapshotThread.start(); // before proceeding, make sure that flushing has started and that the snapshot is still blocked; // this would block forever if the snapshot didn't perform a flush producer.waitUntilFlushStarted(); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); // now, complete the callbacks producer.getPendingCallbacks().get(0).onCompletion(null, null); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); Assert.assertEquals(2, producer.getPendingSize()); producer.getPendingCallbacks().get(1).onCompletion(null, null); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); Assert.assertEquals(1, producer.getPendingSize()); producer.getPendingCallbacks().get(2).onCompletion(null, null); Assert.assertEquals(0, producer.getPendingSize()); // this would fail with an exception if flushing wasn't completed before the snapshot method returned snapshotThread.sync(); testHarness.close(); }
Example 18
Source File: FlinkPulsarSinkTest.java From pulsar-flink with Apache License 2.0 | 4 votes |
/** * Test ensuring that the producer is not dropping buffered records; * we set a timeout because the test will not finish if the logic is broken. */ @SuppressWarnings("unchecked") @Test(timeout = 10000) public void testAtLeastOnceProducer() throws Throwable { final DummyFlinkPulsarSink<String> sink = new DummyFlinkPulsarSink<>(dummyClientConf(), dummyProperties(), mock(TopicKeyExtractor.class), null); final Producer mockProducer = sink.getProducer("tp"); final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink)); testHarness.open(); testHarness.processElement(new StreamRecord<>("msg-1")); testHarness.processElement(new StreamRecord<>("msg-2")); testHarness.processElement(new StreamRecord<>("msg-3")); verify(mockProducer, times(3)); Assert.assertEquals(3, sink.getPendingSize()); // start a thread to perform checkpointing CheckedThread snapshotThread = new CheckedThread() { @Override public void go() throws Exception { // this should block until all records are flushed; // if the snapshot implementation returns before pending records are flushed, testHarness.snapshot(123L, 123L); } }; snapshotThread.start(); // before proceeding, make sure that flushing has started and that the snapshot is still blocked; // this would block forever if the snapshot didn't perform a flush sink.waitUntilFlushStarted(); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); // now, complete the callbacks sink.getPendingCallbacks().get(0).accept(null, null); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); Assert.assertEquals(2, sink.getPendingSize()); sink.getPendingCallbacks().get(1).accept(null, null); Assert.assertTrue("Snapshot returned before all records were flushed", snapshotThread.isAlive()); Assert.assertEquals(1, sink.getPendingSize()); sink.getPendingCallbacks().get(2).accept(null, null); Assert.assertEquals(0, sink.getPendingSize()); // this would fail with an exception if flushing wasn't completed before the snapshot method returned snapshotThread.sync(); testHarness.close(); }
Example 19
Source File: ElasticsearchSinkBaseTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Tests that the sink correctly waits for pending requests (including re-added requests) on checkpoints; * we set a timeout because the test will not finish if the logic is broken. */ @Test(timeout = 5000) public void testAtLeastOnceSink() throws Throwable { final DummyElasticsearchSink<String> sink = new DummyElasticsearchSink<>( new HashMap<String, String>(), new SimpleSinkFunction<String>(), new DummyRetryFailureHandler()); // use a failure handler that simply re-adds requests final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(sink)); testHarness.open(); // setup the next bulk request, and its mock item failures; // it contains 1 request, which will fail and re-added to the next bulk request sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList(new Exception("artificial failure for record"))); testHarness.processElement(new StreamRecord<>("msg")); verify(sink.getMockBulkProcessor(), times(1)).add(any(IndexRequest.class)); CheckedThread snapshotThread = new CheckedThread() { @Override public void go() throws Exception { testHarness.snapshot(1L, 1000L); } }; snapshotThread.start(); // the snapshot should eventually be blocked before snapshot triggers flushing while (snapshotThread.getState() != Thread.State.WAITING) { Thread.sleep(10); } sink.continueFlush(); // since the previous flush should have resulted in a request re-add from the failure handler, // we should have flushed again, and eventually be blocked before snapshot triggers the 2nd flush while (snapshotThread.getState() != Thread.State.WAITING) { Thread.sleep(10); } // current number of pending request should be 1 due to the re-add Assert.assertEquals(1, sink.getNumPendingRequests()); // this time, let the bulk request succeed, so no-more requests are re-added sink.setMockItemFailuresListForNextBulkItemResponses(Collections.singletonList((Exception) null)); sink.continueFlush(); // the snapshot should finish with no exceptions snapshotThread.sync(); testHarness.close(); }
Example 20
Source File: FlinkKafkaProducerBaseTest.java From flink with Apache License 2.0 | 4 votes |
/** * Test ensuring that if an async exception is caught for one of the flushed requests on checkpoint, * it should be rethrown; we set a timeout because the test will not finish if the logic is broken. * * <p>Note that this test does not test the snapshot method is blocked correctly when there are pending records. * The test for that is covered in testAtLeastOnceProducer. */ @SuppressWarnings("unchecked") @Test(timeout = 5000) public void testAsyncErrorRethrownOnCheckpointAfterFlush() throws Throwable { final DummyFlinkKafkaProducer<String> producer = new DummyFlinkKafkaProducer<>( FakeStandardProducerConfig.get(), new KeyedSerializationSchemaWrapper<>(new SimpleStringSchema()), null); producer.setFlushOnCheckpoint(true); final KafkaProducer<?, ?> mockProducer = producer.getMockKafkaProducer(); final OneInputStreamOperatorTestHarness<String, Object> testHarness = new OneInputStreamOperatorTestHarness<>(new StreamSink<>(producer)); testHarness.open(); testHarness.processElement(new StreamRecord<>("msg-1")); testHarness.processElement(new StreamRecord<>("msg-2")); testHarness.processElement(new StreamRecord<>("msg-3")); verify(mockProducer, times(3)).send(any(ProducerRecord.class), any(Callback.class)); // only let the first callback succeed for now producer.getPendingCallbacks().get(0).onCompletion(null, null); CheckedThread snapshotThread = new CheckedThread() { @Override public void go() throws Exception { // this should block at first, since there are still two pending records that needs to be flushed testHarness.snapshot(123L, 123L); } }; snapshotThread.start(); // let the 2nd message fail with an async exception producer.getPendingCallbacks().get(1).onCompletion(null, new Exception("artificial async failure for 2nd message")); producer.getPendingCallbacks().get(2).onCompletion(null, null); try { snapshotThread.sync(); } catch (Exception e) { // the snapshot should have failed with the async exception Assert.assertTrue(e.getCause().getMessage().contains("artificial async failure for 2nd message")); // test succeeded return; } Assert.fail(); }