Java Code Examples for org.apache.flink.streaming.api.functions.source.ContinuousFileMonitoringFunction#cancel()
The following examples show how to use
org.apache.flink.streaming.api.functions.source.ContinuousFileMonitoringFunction#cancel() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ContinuousFileProcessingMigrationTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
/** * Manually run this to write binary snapshot data. Remove @Ignore to run. */ @Ignore @Test public void writeMonitoringSourceSnapshot() throws Exception { File testFolder = tempFolder.newFolder(); long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<File, String> file = createFileAndFillWithData(testFolder, "file", i, "This is test line."); fileModTime = file.f0.lastModified(); } TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath())); final ContinuousFileMonitoringFunction<String> monitoringFunction = new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } @Override public void markAsTemporarilyIdle() { } }); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); if (!latch.isTriggered()) { latch.await(); } final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle( snapshot, "src/test/resources/monitoring-function-migration-test-" + fileModTime + "-flink" + flinkGenerateSavepointVersion + "-snapshot"); monitoringFunction.cancel(); runner.join(); testHarness.close(); }
Example 2
Source File: ContinuousFileProcessingTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void testFunctionRestore() throws Exception { String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/"; org.apache.hadoop.fs.Path path = null; long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<org.apache.hadoop.fs.Path, String> file = createFileAndFillWithData(testBasePath, "file", i, "This is test line."); path = file.f0; fileModTime = hdfs.getFileStatus(file.f0).getModificationTime(); } TextInputFormat format = new TextInputFormat(new Path(testBasePath)); final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); final DummySourceContext sourceContext = new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } }; // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(sourceContext); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); // first condition for the source to have updated its state: emit at least one element if (!latch.isTriggered()) { latch.await(); } // second condition for the source to have updated its state: it's not on the lock anymore, // this means it has processed all the splits and updated its state. synchronized (sourceContext.getCheckpointLock()) {} OperatorSubtaskState snapshot = testHarness.snapshot(0, 0); monitoringFunction.cancel(); runner.join(); testHarness.close(); final ContinuousFileMonitoringFunction<String> monitoringFunctionCopy = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> srcCopy = new StreamSource<>(monitoringFunctionCopy); AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarnessCopy = new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0); testHarnessCopy.initializeState(snapshot); testHarnessCopy.open(); Assert.assertNull(error[0]); Assert.assertEquals(fileModTime, monitoringFunctionCopy.getGlobalModificationTime()); hdfs.delete(path, false); }
Example 3
Source File: ContinuousFileProcessingMigrationTest.java From flink with Apache License 2.0 | 4 votes |
/** * Manually run this to write binary snapshot data. Remove @Ignore to run. */ @Ignore @Test public void writeMonitoringSourceSnapshot() throws Exception { File testFolder = tempFolder.newFolder(); long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<File, String> file = createFileAndFillWithData(testFolder, "file", i, "This is test line."); fileModTime = file.f0.lastModified(); } TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath())); final ContinuousFileMonitoringFunction<String> monitoringFunction = new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } @Override public void markAsTemporarilyIdle() { } }); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); if (!latch.isTriggered()) { latch.await(); } final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle( snapshot, "src/test/resources/monitoring-function-migration-test-" + fileModTime + "-flink" + flinkGenerateSavepointVersion + "-snapshot"); monitoringFunction.cancel(); runner.join(); testHarness.close(); }
Example 4
Source File: ContinuousFileProcessingTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testFunctionRestore() throws Exception { String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/"; org.apache.hadoop.fs.Path path = null; long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<org.apache.hadoop.fs.Path, String> file = createFileAndFillWithData(testBasePath, "file", i, "This is test line."); path = file.f0; fileModTime = hdfs.getFileStatus(file.f0).getModificationTime(); } TextInputFormat format = new TextInputFormat(new Path(testBasePath)); final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); final DummySourceContext sourceContext = new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } }; // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(sourceContext); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); // first condition for the source to have updated its state: emit at least one element if (!latch.isTriggered()) { latch.await(); } // second condition for the source to have updated its state: it's not on the lock anymore, // this means it has processed all the splits and updated its state. synchronized (sourceContext.getCheckpointLock()) {} OperatorSubtaskState snapshot = testHarness.snapshot(0, 0); monitoringFunction.cancel(); runner.join(); testHarness.close(); final ContinuousFileMonitoringFunction<String> monitoringFunctionCopy = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> srcCopy = new StreamSource<>(monitoringFunctionCopy); AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarnessCopy = new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0); testHarnessCopy.initializeState(snapshot); testHarnessCopy.open(); Assert.assertNull(error[0]); Assert.assertEquals(fileModTime, monitoringFunctionCopy.getGlobalModificationTime()); hdfs.delete(path, false); }
Example 5
Source File: ContinuousFileProcessingMigrationTest.java From flink with Apache License 2.0 | 4 votes |
/** * Manually run this to write binary snapshot data. Remove @Ignore to run. */ @Ignore @Test public void writeMonitoringSourceSnapshot() throws Exception { File testFolder = tempFolder.newFolder(); long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<File, String> file = createFileAndFillWithData(testFolder, "file", i, "This is test line."); fileModTime = file.f0.lastModified(); } TextInputFormat format = new TextInputFormat(new Path(testFolder.getAbsolutePath())); final ContinuousFileMonitoringFunction<String> monitoringFunction = new ContinuousFileMonitoringFunction<>(format, FileProcessingMode.PROCESS_CONTINUOUSLY, 1, INTERVAL); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } @Override public void markAsTemporarilyIdle() { } }); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); if (!latch.isTriggered()) { latch.await(); } final OperatorSubtaskState snapshot; synchronized (testHarness.getCheckpointLock()) { snapshot = testHarness.snapshot(0L, 0L); } OperatorSnapshotUtil.writeStateHandle( snapshot, "src/test/resources/monitoring-function-migration-test-" + fileModTime + "-flink" + flinkGenerateSavepointVersion + "-snapshot"); monitoringFunction.cancel(); runner.join(); testHarness.close(); }
Example 6
Source File: ContinuousFileProcessingTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testFunctionRestore() throws Exception { String testBasePath = hdfsURI + "/" + UUID.randomUUID() + "/"; org.apache.hadoop.fs.Path path = null; long fileModTime = Long.MIN_VALUE; for (int i = 0; i < 1; i++) { Tuple2<org.apache.hadoop.fs.Path, String> file = createFileAndFillWithData(testBasePath, "file", i, "This is test line."); path = file.f0; fileModTime = hdfs.getFileStatus(file.f0).getModificationTime(); } TextInputFormat format = new TextInputFormat(new Path(testBasePath)); final ContinuousFileMonitoringFunction<String> monitoringFunction = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> src = new StreamSource<>(monitoringFunction); final AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarness = new AbstractStreamOperatorTestHarness<>(src, 1, 1, 0); testHarness.open(); final Throwable[] error = new Throwable[1]; final OneShotLatch latch = new OneShotLatch(); final DummySourceContext sourceContext = new DummySourceContext() { @Override public void collect(TimestampedFileInputSplit element) { latch.trigger(); } }; // run the source asynchronously Thread runner = new Thread() { @Override public void run() { try { monitoringFunction.run(sourceContext); } catch (Throwable t) { t.printStackTrace(); error[0] = t; } } }; runner.start(); // first condition for the source to have updated its state: emit at least one element if (!latch.isTriggered()) { latch.await(); } // second condition for the source to have updated its state: it's not on the lock anymore, // this means it has processed all the splits and updated its state. synchronized (sourceContext.getCheckpointLock()) {} OperatorSubtaskState snapshot = testHarness.snapshot(0, 0); monitoringFunction.cancel(); runner.join(); testHarness.close(); final ContinuousFileMonitoringFunction<String> monitoringFunctionCopy = createTestContinuousFileMonitoringFunction(format, FileProcessingMode.PROCESS_CONTINUOUSLY); StreamSource<TimestampedFileInputSplit, ContinuousFileMonitoringFunction<String>> srcCopy = new StreamSource<>(monitoringFunctionCopy); AbstractStreamOperatorTestHarness<TimestampedFileInputSplit> testHarnessCopy = new AbstractStreamOperatorTestHarness<>(srcCopy, 1, 1, 0); testHarnessCopy.initializeState(snapshot); testHarnessCopy.open(); Assert.assertNull(error[0]); Assert.assertEquals(fileModTime, monitoringFunctionCopy.getGlobalModificationTime()); hdfs.delete(path, false); }