Java Code Examples for org.apache.flink.util.FileUtils

The following examples show how to use org.apache.flink.util.FileUtils. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: Flink-CEPplus   Source File: PageRankITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void before() throws Exception{
	File resultFile = tempFolder.newFile();
	//Delete file because the Scala API does not respect WriteMode set by the configuration
	resultFile.delete();
	resultPath = resultFile.toURI().toString();

	File verticesFile = tempFolder.newFile();
	FileUtils.writeFileUtf8(verticesFile, PageRankData.VERTICES);

	File edgesFile = tempFolder.newFile();
	FileUtils.writeFileUtf8(edgesFile, PageRankData.EDGES);

	verticesPath = verticesFile.toURI().toString();
	edgesPath = edgesFile.toURI().toString();
}
 
Example 2
Source Project: flink   Source File: PageRankITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void before() throws Exception{
	File resultFile = tempFolder.newFile();
	//Delete file because the Scala API does not respect WriteMode set by the configuration
	resultFile.delete();
	resultPath = resultFile.toURI().toString();

	File verticesFile = tempFolder.newFile();
	FileUtils.writeFileUtf8(verticesFile, PageRankData.VERTICES);

	File edgesFile = tempFolder.newFile();
	FileUtils.writeFileUtf8(edgesFile, PageRankData.EDGES);

	verticesPath = verticesFile.toURI().toString();
	edgesPath = edgesFile.toURI().toString();
}
 
Example 3
Source Project: flink   Source File: IncrementalSSSPITCase.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void before() throws Exception {
	resultPath = tempFolder.newFile().toURI().toString();
	File verticesFile = tempFolder.newFile();
	FileUtils.writeFileUtf8(verticesFile, IncrementalSSSPData.VERTICES);

	File edgesFile = tempFolder.newFile();
	FileUtils.writeFileUtf8(edgesFile, IncrementalSSSPData.EDGES);

	File edgesInSSSPFile = tempFolder.newFile();
	FileUtils.writeFileUtf8(edgesInSSSPFile, IncrementalSSSPData.EDGES_IN_SSSP);

	verticesPath = verticesFile.toURI().toString();
	edgesPath = edgesFile.toURI().toString();
	edgesInSSSPPath = edgesInSSSPFile.toURI().toString();
}
 
Example 4
Source Project: flink   Source File: SpilledBufferOrEventSequenceTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testCleanup() {
	try {
		ByteBuffer data = ByteBuffer.allocate(157);
		data.order(ByteOrder.LITTLE_ENDIAN);

		FileUtils.writeCompletely(fileChannel, data);
		fileChannel.position(54);

		SpilledBufferOrEventSequence seq = new SpilledBufferOrEventSequence(tempFile, fileChannel, buffer, pageSize);
		seq.open();
		seq.cleanup();

		assertFalse(fileChannel.isOpen());
		assertFalse(tempFile.exists());
	}
	catch (Exception e) {
		e.printStackTrace();
		fail(e.getMessage());
	}
}
 
Example 5
Source Project: Flink-CEPplus   Source File: AbstractBlobCache.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void close() throws IOException {
	cancelCleanupTask();

	if (shutdownRequested.compareAndSet(false, true)) {
		log.info("Shutting down BLOB cache");

		// Clean up the storage directory
		try {
			FileUtils.deleteDirectory(storageDir);
		} finally {
			// Remove shutdown hook to prevent resource leaks
			ShutdownHookUtil.removeShutdownHook(shutdownHook, getClass().getSimpleName(), log);
		}
	}
}
 
Example 6
Source Project: Flink-CEPplus   Source File: IOManager.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Close method, marks the I/O manager as closed
 * and removed all temporary files.
 */
public void shutdown() {
	// remove all of our temp directories
	for (File path : paths) {
		try {
			if (path != null) {
				if (path.exists()) {
					FileUtils.deleteDirectory(path);
					LOG.info("I/O manager removed spill file directory {}", path.getAbsolutePath());
				}
			}
		} catch (Throwable t) {
			LOG.error("IOManager failed to properly clean up temp file directory: " + path, t);
		}
	}
}
 
Example 7
Source Project: flink   Source File: AbstractTaskManagerFileHandlerTest.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Tests that the {@link AbstractTaskManagerFileHandler} serves the requested file.
 */
@Test
public void testFileServing() throws Exception {
	final Time cacheEntryDuration = Time.milliseconds(1000L);

	final Queue<CompletableFuture<TransientBlobKey>> requestFileUploads = new ArrayDeque<>(1);

	requestFileUploads.add(CompletableFuture.completedFuture(transientBlobKey1));

	final TestTaskManagerFileHandler testTaskManagerFileHandler = createTestTaskManagerFileHandler(cacheEntryDuration, requestFileUploads, EXPECTED_TASK_MANAGER_ID);

	final File outputFile = temporaryFolder.newFile();
	final TestingChannelHandlerContext testingContext = new TestingChannelHandlerContext(outputFile);

	testTaskManagerFileHandler.respondToRequest(
		testingContext,
		HTTP_REQUEST,
		handlerRequest,
		null);

	assertThat(outputFile.length(), is(greaterThan(0L)));
	assertThat(FileUtils.readFileUtf8(outputFile), is(equalTo(fileContent1)));
}
 
Example 8
Source Project: flink   Source File: AbstractBlobCache.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void close() throws IOException {
	cancelCleanupTask();

	if (shutdownRequested.compareAndSet(false, true)) {
		log.info("Shutting down BLOB cache");

		// Clean up the storage directory
		try {
			FileUtils.deleteDirectory(storageDir);
		} finally {
			// Remove shutdown hook to prevent resource leaks
			ShutdownHookUtil.removeShutdownHook(shutdownHook, getClass().getSimpleName(), log);
		}
	}
}
 
Example 9
Source Project: flink   Source File: JsonRowSchemaConverterTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testComplexSchema() throws Exception {
	final URL url = getClass().getClassLoader().getResource("complex-schema.json");
	Objects.requireNonNull(url);
	final String schema = FileUtils.readFileUtf8(new File(url.getFile()));
	final TypeInformation<?> result = JsonRowSchemaConverter.convert(schema);

	final TypeInformation<?> expected = Types.ROW_NAMED(
		new String[] {"fn", "familyName", "additionalName", "tuples", "honorificPrefix", "url",
			"email", "tel", "sound", "org"},
		Types.STRING, Types.STRING, Types.BOOLEAN, Types.ROW(Types.BIG_DEC, Types.STRING, Types.STRING, Types.STRING),
		Types.OBJECT_ARRAY(Types.STRING), Types.STRING, Types.ROW_NAMED(new String[] {"type", "value"}, Types.STRING, Types.STRING),
		Types.ROW_NAMED(new String[] {"type", "value"}, Types.BIG_DEC, Types.STRING), Types.VOID,
		Types.ROW_NAMED(new String[] {"organizationUnit"}, Types.ROW()));

	assertEquals(expected, result);
}
 
Example 10
Source Project: flink   Source File: FileCacheDirectoriesTest.java    License: Apache License 2.0 6 votes vote down vote up
private void testDirectoryDownloaded(DistributedCache.DistributedCacheEntry entry) throws Exception {
	JobID jobID = new JobID();
	ExecutionAttemptID attemptID = new ExecutionAttemptID();

	// copy / create the file
	final String fileName = "test_file";
	Future<Path> copyResult = fileCache.createTmpFile(fileName, entry, jobID, attemptID);

	final Path dstPath = copyResult.get();
	final FileSystem fs = dstPath.getFileSystem();
	final FileStatus fileStatus = fs.getFileStatus(dstPath);
	assertTrue(fileStatus.isDir());

	final Path cacheFile = new Path(dstPath, "cacheFile");
	assertTrue(fs.exists(cacheFile));
	final String actualContent = FileUtils.readFileUtf8(new File(cacheFile.getPath()));
	assertEquals(testFileContent, actualContent);
}
 
Example 11
Source Project: flink   Source File: FileCacheDirectoriesTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDirectoryDownloadedFromBlob() throws Exception {
	JobID jobID = new JobID();
	ExecutionAttemptID attemptID = new ExecutionAttemptID();

	final String fileName = "test_file";
	// copy / create the file
	final DistributedCache.DistributedCacheEntry entry = new DistributedCache.DistributedCacheEntry(
		fileName,
		false,
		InstantiationUtil.serializeObject(permanentBlobKey),
		true);
	Future<Path> copyResult = fileCache.createTmpFile(fileName, entry, jobID, attemptID);

	final Path dstPath = copyResult.get();
	final FileSystem fs = dstPath.getFileSystem();
	final FileStatus fileStatus = fs.getFileStatus(dstPath);
	assertTrue(fileStatus.isDir());

	final Path cacheFile = new Path(dstPath, "cacheFile");
	assertTrue(fs.exists(cacheFile));
	final String actualContent = FileUtils.readFileUtf8(new File(cacheFile.getPath()));
	assertEquals(testFileContent, actualContent);
}
 
Example 12
Source Project: Flink-CEPplus   Source File: EnvironmentFileUtil.java    License: Apache License 2.0 5 votes vote down vote up
public static Environment parseModified(String fileName, Map<String, String> replaceVars) throws IOException {
	final URL url = EnvironmentFileUtil.class.getClassLoader().getResource(fileName);
	Objects.requireNonNull(url);
	String schema = FileUtils.readFileUtf8(new File(url.getFile()));

	for (Map.Entry<String, String> replaceVar : replaceVars.entrySet()) {
		schema = schema.replace(replaceVar.getKey(), replaceVar.getValue());
	}

	return Environment.parse(schema);
}
 
Example 13
Source Project: flink   Source File: JsonRowSchemaConverterTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testReferenceSchema() throws Exception {
	final URL url = getClass().getClassLoader().getResource("reference-schema.json");
	Objects.requireNonNull(url);
	final String schema = FileUtils.readFileUtf8(new File(url.getFile()));
	final TypeInformation<?> result = JsonRowSchemaConverter.convert(schema);

	final TypeInformation<?> expected = Types.ROW_NAMED(
		new String[] {"billing_address", "shipping_address", "optional_address"},
		Types.ROW_NAMED(new String[] {"street_address", "city", "state"}, Types.STRING, Types.STRING, Types.STRING),
		Types.ROW_NAMED(new String[] {"street_address", "city", "state"}, Types.STRING, Types.STRING, Types.STRING),
		Types.ROW_NAMED(new String[] {"street_address", "city", "state"}, Types.STRING, Types.STRING, Types.STRING));

	assertEquals(expected, result);
}
 
Example 14
Source Project: flink   Source File: FileChannelManagerImpl.java    License: Apache License 2.0 5 votes vote down vote up
private static AutoCloseable getFileCloser(File path) {
	return () -> {
		try {
			FileUtils.deleteDirectory(path);
			LOG.info("FileChannelManager removed spill file directory {}", path.getAbsolutePath());
		} catch (IOException e) {
			String errorMessage = String.format("FileChannelManager failed to properly clean up temp file directory: %s", path);
			throw new IOException(errorMessage, e);
		}
	};
}
 
Example 15
Source Project: Flink-CEPplus   Source File: AbstractTestBase.java    License: Apache License 2.0 5 votes vote down vote up
public String createTempFile(String fileName, String contents) throws IOException {
	File f = createAndRegisterTempFile(fileName);
	if (!f.getParentFile().exists()) {
		f.getParentFile().mkdirs();
	}
	f.createNewFile();
	FileUtils.writeFileUtf8(f, contents);
	return f.toURI().toString();
}
 
Example 16
protected Configuration createConfiguration(boolean compressionEnabled, String subpartitionType) {
	Configuration configuration = super.createConfiguration();

	configuration.setBoolean(NettyShuffleEnvironmentOptions.BLOCKING_SHUFFLE_COMPRESSION_ENABLED, compressionEnabled);
	configuration.setString(NettyShuffleEnvironmentOptions.NETWORK_BLOCKING_SHUFFLE_TYPE, subpartitionType);
	configuration.setString(CoreOptions.TMP_DIRS, FileUtils.getCurrentWorkingDirectory().toAbsolutePath().toUri().toString());
	return configuration;
}
 
Example 17
Source Project: flink   Source File: FileUploadHandlerTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testUploadDirectoryRegeneration() throws Exception {
	OkHttpClient client = new OkHttpClient();

	MultipartUploadResource.MultipartFileHandler fileHandler = MULTIPART_UPLOAD_RESOURCE.getFileHandler();

	FileUtils.deleteDirectory(MULTIPART_UPLOAD_RESOURCE.getUploadDirectory().toFile());

	Request fileRequest = buildFileRequest(fileHandler.getMessageHeaders().getTargetRestEndpointURL());
	try (Response response = client.newCall(fileRequest).execute()) {
		assertEquals(fileHandler.getMessageHeaders().getResponseStatusCode().code(), response.code());
	}
}
 
Example 18
Source Project: flink   Source File: FileCache.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Path call() throws IOException {
	// let exceptions propagate. we can retrieve them later from
	// the future and report them upon access to the result
	FileUtils.copy(filePath, cachedPath, this.executable);
	return cachedPath;
}
 
Example 19
Source Project: flink   Source File: AbstractTaskManagerFileHandlerTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Tests that file cache entries expire.
 */
@Test
public void testFileCacheExpiration() throws Exception {
	final Time cacheEntryDuration = Time.milliseconds(5L);

	final File outputFile = runFileCachingTest(cacheEntryDuration, cacheEntryDuration);

	assertThat(outputFile.length(), is(greaterThan(0L)));
	assertThat(FileUtils.readFileUtf8(outputFile), is(equalTo(fileContent2)));
}
 
Example 20
Source Project: flink   Source File: FileUploadHandler.java    License: Apache License 2.0 5 votes vote down vote up
private void deleteUploadedFiles() {
	if (currentUploadDir != null) {
		try {
			FileUtils.deleteDirectory(currentUploadDir.toFile());
		} catch (IOException e) {
			LOG.warn("Could not cleanup uploaded files.", e);
		}
	}
}
 
Example 21
Source Project: bravo   Source File: RocksDBCheckpointIterator.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void close() throws IOException {
	IOUtils.closeQuietly(cancelStreamRegistry);
	IOUtils.closeAllQuietly(stateColumnFamilyHandles);
	IOUtils.closeQuietly(db);
	IOUtils.closeQuietly(dbOptions);
	IOUtils.closeQuietly(colOptions);
	FileUtils.deleteDirectoryQuietly(new File(localPath));
}
 
Example 22
Source Project: Flink-CEPplus   Source File: HDFSTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test that {@link FileUtils#deletePathIfEmpty(FileSystem, Path)} deletes the path if it is
 * empty. A path can only be empty if it is a directory which does not contain any
 * files/directories.
 */
@Test
public void testDeletePathIfEmpty() throws IOException {
	final Path basePath = new Path(hdfsURI);
	final Path directory = new Path(basePath, UUID.randomUUID().toString());
	final Path directoryFile = new Path(directory, UUID.randomUUID().toString());
	final Path singleFile = new Path(basePath, UUID.randomUUID().toString());

	FileSystem fs = basePath.getFileSystem();

	fs.mkdirs(directory);

	byte[] data = "HDFSTest#testDeletePathIfEmpty".getBytes(ConfigConstants.DEFAULT_CHARSET);

	for (Path file: Arrays.asList(singleFile, directoryFile)) {
		org.apache.flink.core.fs.FSDataOutputStream outputStream = fs.create(file, FileSystem.WriteMode.OVERWRITE);
		outputStream.write(data);
		outputStream.close();
	}

	// verify that the files have been created
	assertTrue(fs.exists(singleFile));
	assertTrue(fs.exists(directoryFile));

	// delete the single file
	assertFalse(FileUtils.deletePathIfEmpty(fs, singleFile));
	assertTrue(fs.exists(singleFile));

	// try to delete the non-empty directory
	assertFalse(FileUtils.deletePathIfEmpty(fs, directory));
	assertTrue(fs.exists(directory));

	// delete the file contained in the directory
	assertTrue(fs.delete(directoryFile, false));

	// now the deletion should work
	assertTrue(FileUtils.deletePathIfEmpty(fs, directory));
	assertFalse(fs.exists(directory));
}
 
Example 23
Source Project: flink   Source File: LocalFileSystemTest.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test that {@link FileUtils#deletePathIfEmpty(FileSystem, Path)} deletes the path if it is
 * empty. A path can only be empty if it is a directory which does not contain any
 * files/directories.
 */
@Test
public void testDeletePathIfEmpty() throws IOException {
	File file = temporaryFolder.newFile();
	File directory = temporaryFolder.newFolder();
	File directoryFile = new File(directory, UUID.randomUUID().toString());

	assertTrue(directoryFile.createNewFile());

	Path filePath = new Path(file.toURI());
	Path directoryPath = new Path(directory.toURI());
	Path directoryFilePath = new Path(directoryFile.toURI());

	FileSystem fs = FileSystem.getLocalFileSystem();

	// verify that the files have been created
	assertTrue(fs.exists(filePath));
	assertTrue(fs.exists(directoryFilePath));

	// delete the single file
	assertFalse(FileUtils.deletePathIfEmpty(fs, filePath));
	assertTrue(fs.exists(filePath));

	// try to delete the non-empty directory
	assertFalse(FileUtils.deletePathIfEmpty(fs, directoryPath));
	assertTrue(fs.exists(directoryPath));

	// delete the file contained in the directory
	assertTrue(fs.delete(directoryFilePath, false));

	// now the deletion should work
	assertTrue(FileUtils.deletePathIfEmpty(fs, directoryPath));
	assertFalse(fs.exists(directoryPath));
}
 
Example 24
Source Project: flink   Source File: AsynchronousFileIOChannel.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void write() throws IOException {
	try {
		FileUtils.writeCompletely(this.channel.fileChannel, this.segment.wrap(0, this.segment.size()));
	}
	catch (NullPointerException npex) {
		throw new IOException("Memory segment has been released.");
	}
}
 
Example 25
@Override
public void close() throws IOException {
	cleanupFuture.cancel(false);

	jobDetailsCache.invalidateAll();

	// clean up the storage directory
	FileUtils.deleteFileOrDirectory(storageDir);

	// Remove shutdown hook to prevent resource leaks
	ShutdownHookUtil.removeShutdownHook(shutdownHook, getClass().getSimpleName(), LOG);
}
 
Example 26
Source Project: Flink-CEPplus   Source File: FileUploadHandler.java    License: Apache License 2.0 5 votes vote down vote up
private void deleteUploadedFiles() {
	if (currentUploadDir != null) {
		try {
			FileUtils.deleteDirectory(currentUploadDir.toFile());
		} catch (IOException e) {
			LOG.warn("Could not cleanup uploaded files.", e);
		}
	}
}
 
Example 27
Source Project: flink   Source File: FileCache.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Path call() throws IOException {
	final File file = blobService.getFile(jobID, blobKey);

	if (isDirectory) {
		Path directory = FileUtils.expandDirectory(new Path(file.getAbsolutePath()), target);
		return directory;
	} else {
		//noinspection ResultOfMethodCallIgnored
		file.setExecutable(isExecutable);
		return Path.fromLocalFile(file);
	}

}
 
Example 28
Source Project: flink   Source File: BlobServer.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Removes all BLOBs from local and HA store belonging to the given job ID.
 *
 * @param jobId
 * 		ID of the job this blob belongs to
 * @param cleanupBlobStoreFiles
 * 		True if the corresponding blob store files shall be cleaned up as well. Otherwise false.
 *
 * @return  <tt>true</tt> if the job directory is successfully deleted or non-existing;
 *          <tt>false</tt> otherwise
 */
public boolean cleanupJob(JobID jobId, boolean cleanupBlobStoreFiles) {
	checkNotNull(jobId);

	final File jobDir =
		new File(BlobUtils.getStorageLocationPath(storageDir.getAbsolutePath(), jobId));

	readWriteLock.writeLock().lock();

	try {
		// delete locally
		boolean deletedLocally = false;
		try {
			FileUtils.deleteDirectory(jobDir);

			// NOTE: Instead of going through blobExpiryTimes, keep lingering entries - they
			//       will be cleaned up by the timer task which tolerates non-existing files
			//       If inserted again with the same IDs (via put()), the TTL will be updated
			//       again.

			deletedLocally = true;
		} catch (IOException e) {
			LOG.warn("Failed to locally delete BLOB storage directory at " +
				jobDir.getAbsolutePath(), e);
		}

		// delete in HA blob store files
		final boolean deletedHA = !cleanupBlobStoreFiles || blobStore.deleteAll(jobId);

		return deletedLocally && deletedHA;
	} finally {
		readWriteLock.writeLock().unlock();
	}
}
 
Example 29
Source Project: Flink-CEPplus   Source File: BlobServer.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Removes all BLOBs from local and HA store belonging to the given job ID.
 *
 * @param jobId
 * 		ID of the job this blob belongs to
 * @param cleanupBlobStoreFiles
 * 		True if the corresponding blob store files shall be cleaned up as well. Otherwise false.
 *
 * @return  <tt>true</tt> if the job directory is successfully deleted or non-existing;
 *          <tt>false</tt> otherwise
 */
public boolean cleanupJob(JobID jobId, boolean cleanupBlobStoreFiles) {
	checkNotNull(jobId);

	final File jobDir =
		new File(BlobUtils.getStorageLocationPath(storageDir.getAbsolutePath(), jobId));

	readWriteLock.writeLock().lock();

	try {
		// delete locally
		boolean deletedLocally = false;
		try {
			FileUtils.deleteDirectory(jobDir);

			// NOTE: Instead of going through blobExpiryTimes, keep lingering entries - they
			//       will be cleaned up by the timer task which tolerates non-existing files
			//       If inserted again with the same IDs (via put()), the TTL will be updated
			//       again.

			deletedLocally = true;
		} catch (IOException e) {
			LOG.warn("Failed to locally delete BLOB storage directory at " +
				jobDir.getAbsolutePath(), e);
		}

		// delete in HA blob store files
		final boolean deletedHA = !cleanupBlobStoreFiles || blobStore.deleteAll(jobId);

		return deletedLocally && deletedHA;
	} finally {
		readWriteLock.writeLock().unlock();
	}
}
 
Example 30
Source Project: flink   Source File: HiveCatalogITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testReadWriteCsv() throws Exception {
	// similar to CatalogTableITCase::testReadWriteCsvUsingDDL but uses HiveCatalog
	EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
	TableEnvironment tableEnv = TableEnvironment.create(settings);
	tableEnv.getConfig().getConfiguration().setInteger(TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, 1);

	tableEnv.registerCatalog("myhive", hiveCatalog);
	tableEnv.useCatalog("myhive");

	String srcPath = this.getClass().getResource("/csv/test3.csv").getPath();

	tableEnv.executeSql("CREATE TABLE src (" +
			"price DECIMAL(10, 2),currency STRING,ts6 TIMESTAMP(6),ts AS CAST(ts6 AS TIMESTAMP(3)),WATERMARK FOR ts AS ts) " +
			String.format("WITH ('connector.type' = 'filesystem','connector.path' = 'file://%s','format.type' = 'csv')", srcPath));

	String sinkPath = new File(tempFolder.newFolder(), "csv-order-sink").toURI().toString();

	tableEnv.executeSql("CREATE TABLE sink (" +
			"window_end TIMESTAMP(3),max_ts TIMESTAMP(6),counter BIGINT,total_price DECIMAL(10, 2)) " +
			String.format("WITH ('connector.type' = 'filesystem','connector.path' = '%s','format.type' = 'csv')", sinkPath));

	TableEnvUtil.execInsertSqlAndWaitResult(tableEnv, "INSERT INTO sink " +
			"SELECT TUMBLE_END(ts, INTERVAL '5' SECOND),MAX(ts6),COUNT(*),MAX(price) FROM src " +
			"GROUP BY TUMBLE(ts, INTERVAL '5' SECOND)");

	String expected = "2019-12-12 00:00:05.0,2019-12-12 00:00:04.004001,3,50.00\n" +
			"2019-12-12 00:00:10.0,2019-12-12 00:00:06.006001,2,5.33\n";
	assertEquals(expected, FileUtils.readFileUtf8(new File(new URI(sinkPath))));
}