Java Code Examples for org.apache.flink.table.api.Table#writeToSink()
The following examples show how to use
org.apache.flink.table.api.Table#writeToSink() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractFlinkClient.java From alchemy with Apache License 2.0 | 5 votes |
private void registerSink(Table table, SinkDescriptor sinkDescriptor) throws Exception { TableSchema tableSchema = table.getSchema(); TableSink tableSink = sinkDescriptor.transform(tableSchema); table.writeToSink(tableSink); LOGGER.info("register sink, name:{}, class:{}", sinkDescriptor.getName(), sinkDescriptor.getClass()); }
Example 2
Source File: FlinkTaskInstanceBusiness.java From PoseidonX with Apache License 2.0 | 5 votes |
/** * 处理具体sql业务逻辑 * @param flinkComponent */ private void dealFlinkTaskLogicComponent(FlinkTaskLogicComponent flinkComponent) { System.out.println(flinkComponent.getLogicSql()); Table resultTable = tEnv.sql( flinkComponent.getLogicSql()); TableSink tableSink = tableSinkMap.get(flinkComponent.getTargetOutputComponentName()); resultTable.printSchema(); resultTable.writeToSink(tableSink); }
Example 3
Source File: JobCompiler.java From AthenaX with Apache License 2.0 | 5 votes |
JobGraph getJobGraph() throws IOException { StreamExecutionEnvironment exeEnv = env.execEnv(); exeEnv.setParallelism(job.parallelism()); this .registerUdfs() .registerInputCatalogs(); Table table = env.sqlQuery(job.sql()); for (String t : job.outputs().listTables()) { table.writeToSink(getOutputTable(job.outputs().getTable(t))); } StreamGraph streamGraph = exeEnv.getStreamGraph(); return streamGraph.getJobGraph(); }
Example 4
Source File: FlinkAvroSQLClient.java From df_data_service with Apache License 2.0 | 5 votes |
public static void tcFlinkAvroSQL(String KafkaServerHostPort, String SchemaRegistryHostPort, String srcTopic, String targetTopic, String consumerGroupId, String sinkKeys, String sqlState) { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env); Properties properties = new Properties(); properties.setProperty(ConstantApp.PK_KAFKA_HOST_PORT.replace("_", "."), KafkaServerHostPort); properties.setProperty(ConstantApp.PK_KAFKA_CONSUMER_GROURP, consumerGroupId); properties.setProperty(ConstantApp.PK_KAFKA_SCHEMA_REGISTRY_HOST_PORT.replace("_", "."), SchemaRegistryHostPort); properties.setProperty(ConstantApp.PK_FLINK_TABLE_SINK_KEYS, sinkKeys); String[] srcTopicList = srcTopic.split(","); for (int i = 0; i < srcTopicList.length; i++) { properties.setProperty(ConstantApp.PK_SCHEMA_SUB_INPUT, srcTopicList[i]); properties.setProperty(ConstantApp.PK_SCHEMA_ID_INPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT) + ""); properties.setProperty(ConstantApp.PK_SCHEMA_STR_INPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT).toString()); tableEnv.registerTableSource(srcTopicList[i], new Kafka010AvroTableSource(srcTopicList[i], properties)); } try { Table result = tableEnv.sql(sqlState); SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic, result); // For old producer, we need to create topic-value subject as well SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic + "-value", result); // delivered properties for sink properties.setProperty(ConstantApp.PK_SCHEMA_SUB_OUTPUT, targetTopic); properties.setProperty(ConstantApp.PK_SCHEMA_ID_OUTPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT) + ""); properties.setProperty(ConstantApp.PK_SCHEMA_STR_OUTPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT).toString()); Kafka09AvroTableSink avro_sink = new Kafka09AvroTableSink(targetTopic, properties, new FlinkFixedPartitioner()); result.writeToSink(avro_sink); env.execute("DF_FlinkSQL_Client_" + srcTopic + "-" + targetTopic); } catch (Exception e) { e.printStackTrace(); } }
Example 5
Source File: UnitTestSuiteFlink.java From df_data_service with Apache License 2.0 | 5 votes |
public static void testFlinkAvroSQL() { System.out.println("TestCase_Test Avro SQL"); String resultFile = "/home/vagrant/test.txt"; String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath(); StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath) .setParallelism(1); StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env); Properties properties = new Properties(); properties.setProperty("bootstrap.servers", "localhost:9092"); properties.setProperty("group.id", "consumer_test"); properties.setProperty("schema.subject", "test-value"); properties.setProperty("schema.registry", "localhost:8081"); properties.setProperty("static.avro.schema", "empty_schema"); try { Kafka09AvroTableSource kafkaAvroTableSource = new Kafka09AvroTableSource("test", properties); tableEnv.registerTableSource("Orders", kafkaAvroTableSource); //Table result = tableEnv.sql("SELECT STREAM name, symbol, exchange FROM Orders"); Table result = tableEnv.sql("SELECT name, symbol, exchangecode FROM Orders"); Files.deleteIfExists(Paths.get(resultFile)); // create a TableSink TableSink sink = new CsvTableSink(resultFile, "|"); // write the result Table to the TableSink result.writeToSink(sink); env.execute("Flink AVRO SQL KAFKA Test"); } catch (Exception e) { e.printStackTrace(); } }
Example 6
Source File: UnitTestSuiteFlink.java From df_data_service with Apache License 2.0 | 5 votes |
public static void testFlinkAvroSQLJson() { System.out.println("TestCase_Test Avro SQL to Json Sink"); final String STATIC_USER_SCHEMA = "{" + "\"type\":\"record\"," + "\"name\":\"myrecord\"," + "\"fields\":[" + " { \"name\":\"symbol\", \"type\":\"string\" }," + " { \"name\":\"name\", \"type\":\"string\" }," + " { \"name\":\"exchangecode\", \"type\":\"string\" }" + "]}"; String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath(); DFRemoteStreamEnvironment env = new DFRemoteStreamEnvironment("localhost", 6123, jarPath) .setParallelism(1); StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env); Properties properties = new Properties(); properties.setProperty("bootstrap.servers", "localhost:9092"); properties.setProperty("group.id", "consumer_test"); properties.setProperty("schema.subject", "test-value"); properties.setProperty("schema.registry", "localhost:8081"); properties.setProperty("useAvro", "avro"); properties.setProperty("static.avro.schema", SchemaRegistryClient.getSchemaFromRegistry("http://localhost:8081", "test-value", "latest").toString()); try { HashMap<String, String> hm = new HashMap<>(); Kafka09AvroTableSource kafkaAvroTableSource = new Kafka09AvroTableSource("test", properties); tableEnv.registerTableSource("Orders", kafkaAvroTableSource); Table result = tableEnv.sql("SELECT name, symbol, exchangecode FROM Orders"); //Kafka09JsonTableSink json_sink = new Kafka09JsonTableSink ("test_json", properties, new FlinkFixedPartitioner()); Kafka09AvroTableSink json_sink = new Kafka09AvroTableSink ("test_json", properties, new FlinkFixedPartitioner()); // write the result Table to the TableSink result.writeToSink(json_sink); env.executeWithDFObj("Flink AVRO SQL KAFKA Test", new DFJobPOPJ().setJobConfig(hm) ); } catch (Exception e) { e.printStackTrace(); } }
Example 7
Source File: FlinkAvroTableAPIClient.java From df_data_service with Apache License 2.0 | 4 votes |
public static void tcFlinkAvroTableAPI(String KafkaServerHostPort, String SchemaRegistryHostPort, String srcTopic, String targetTopic, String consumerGroupId, String sinkKeys, String transScript) { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env); Properties properties = new Properties(); properties.setProperty(ConstantApp.PK_KAFKA_HOST_PORT.replace("_", "."), KafkaServerHostPort); properties.setProperty(ConstantApp.PK_KAFKA_CONSUMER_GROURP, consumerGroupId); properties.setProperty(ConstantApp.PK_KAFKA_SCHEMA_REGISTRY_HOST_PORT.replace("_", "."), SchemaRegistryHostPort); properties.setProperty(ConstantApp.PK_FLINK_TABLE_SINK_KEYS, sinkKeys); String[] srcTopicList = srcTopic.split(","); for (int i = 0; i < srcTopicList.length; i++) { properties.setProperty(ConstantApp.PK_SCHEMA_SUB_INPUT, srcTopicList[i]); properties.setProperty(ConstantApp.PK_SCHEMA_ID_INPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT) + ""); properties.setProperty(ConstantApp.PK_SCHEMA_STR_INPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT).toString()); tableEnv.registerTableSource(srcTopic, new Kafka010AvroTableSource(srcTopicList[i], properties)); } try { Table result; Table ingest = tableEnv.scan(srcTopic); String className = "dynamic.FlinkScript"; String header = "package dynamic;\n" + "import org.apache.flink.table.api.Table;\n" + "import com.datafibers.util.*;\n"; String javaCode = header + "public class FlinkScript implements DynamicRunner {\n" + "@Override \n" + " public Table transTableObj(Table tbl) {\n" + "try {" + "return tbl." + transScript + ";\n" + "} catch (Exception e) {" + "};" + "return null;}}"; // Dynamic code generation Class aClass = CompilerUtils.CACHED_COMPILER.loadFromJava(className, javaCode); DynamicRunner runner = (DynamicRunner) aClass.newInstance(); result = runner.transTableObj(ingest); SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic, result); // delivered properties for sink properties.setProperty(ConstantApp.PK_SCHEMA_SUB_OUTPUT, targetTopic); properties.setProperty(ConstantApp.PK_SCHEMA_ID_OUTPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT) + ""); properties.setProperty(ConstantApp.PK_SCHEMA_STR_OUTPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT).toString()); Kafka09AvroTableSink avro_sink = new Kafka09AvroTableSink(targetTopic, properties, new FlinkFixedPartitioner()); result.writeToSink(avro_sink); env.execute("DF_FlinkTableAPI_Client_" + srcTopic + "-" + targetTopic); } catch (Exception e) { e.printStackTrace(); } }
Example 8
Source File: TCFlinkAvroSQL.java From df_data_service with Apache License 2.0 | 4 votes |
public static void tcFlinkAvroSQL(String SchemaRegistryHostPort, String srcTopic, String targetTopic, String sqlState) { System.out.println("tcFlinkAvroSQL"); String resultFile = "testResult"; String jarPath = "C:/Users/dadu/Coding/df_data_service/target/df-data-service-1.1-SNAPSHOT-fat.jar"; //String jarPath = "/Users/will/Documents/Coding/GitHub/df_data_service/target/df-data-service-1.1-SNAPSHOT-fat.jar"; StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath) .setParallelism(1); StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env); Properties properties = new Properties(); properties.setProperty(ConstantApp.PK_KAFKA_HOST_PORT.replace("_", "."), "localhost:9092"); properties.setProperty(ConstantApp.PK_KAFKA_CONSUMER_GROURP, "consumer_test"); //properties.setProperty(ConstantApp.PK_SCHEMA_SUB_OUTPUT, "test"); properties.setProperty(ConstantApp.PK_KAFKA_SCHEMA_REGISTRY_HOST_PORT.replace("_", "."), SchemaRegistryHostPort); properties.setProperty(ConstantApp.PK_FLINK_TABLE_SINK_KEYS, "symbol"); String[] srcTopicList = srcTopic.split(","); for (int i = 0; i < srcTopicList.length; i++) { properties.setProperty(ConstantApp.PK_SCHEMA_SUB_INPUT, srcTopicList[i]); properties.setProperty(ConstantApp.PK_SCHEMA_ID_INPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT) + ""); properties.setProperty(ConstantApp.PK_SCHEMA_STR_INPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_INPUT).toString()); tableEnv.registerTableSource(srcTopicList[i], new Kafka010AvroTableSource(srcTopicList[i], properties)); } try { Table result = tableEnv.sql(sqlState); result.printSchema(); System.out.println("generated avro schema is = " + SchemaRegistryClient.tableAPIToAvroSchema(result, targetTopic)); SchemaRegistryClient.addSchemaFromTableResult(SchemaRegistryHostPort, targetTopic, result); // delivered properties properties.setProperty(ConstantApp.PK_SCHEMA_SUB_OUTPUT, targetTopic); properties.setProperty(ConstantApp.PK_SCHEMA_ID_OUTPUT, SchemaRegistryClient.getLatestSchemaIDFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT) + ""); properties.setProperty(ConstantApp.PK_SCHEMA_STR_OUTPUT, SchemaRegistryClient.getLatestSchemaFromProperty(properties, ConstantApp.PK_SCHEMA_SUB_OUTPUT).toString()); System.out.println(Paths.get(resultFile).toAbsolutePath()); Kafka09AvroTableSink avro_sink = new Kafka09AvroTableSink(targetTopic, properties, new FlinkFixedPartitioner()); result.writeToSink(avro_sink); //result.writeToSink(new CsvTableSink(resultFile, "|", 1, FileSystem.WriteMode.OVERWRITE)); env.execute("tcFlinkAvroSQL"); } catch (Exception e) { e.printStackTrace(); } }
Example 9
Source File: CodeGenFlinkTable.java From df_data_service with Apache License 2.0 | 4 votes |
public static void main(String args[]) { String transform = "flatMap(new FlinkUDF.LineSplitter()).groupBy(0).sum(1).print();\n"; String transform2 = "select(\"name\");\n"; String header = "package dynamic;\n" + "import org.apache.flink.api.table.Table;\n" + "import com.datafibers.util.*;\n"; String javaCode = header + "public class FlinkScript implements DynamicRunner {\n" + "@Override \n" + " public void runTransform(DataSet<String> ds) {\n" + "try {" + "ds."+ transform + "} catch (Exception e) {" + "};" + "}}"; String javaCode2 = header + "public class FlinkScript implements DynamicRunner {\n" + "@Override \n" + " public Table transTableObj(Table tbl) {\n" + "try {" + "return tbl."+ transform2 + "} catch (Exception e) {" + "};" + "return null;}}"; final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env); CsvTableSource csvTableSource = new CsvTableSource( "/Users/will/Downloads/file.csv", new String[] { "name", "id", "score", "comments" }, new TypeInformation<?>[] { Types.STRING(), Types.STRING(), Types.STRING(), Types.STRING() }); // lenient tableEnv.registerTableSource("mycsv", csvTableSource); TableSink sink = new CsvTableSink("/Users/will/Downloads/out.csv", "|"); Table ingest = tableEnv.scan("mycsv"); try { String className = "dynamic.FlinkScript"; Class aClass = CompilerUtils.CACHED_COMPILER.loadFromJava(className, javaCode2); DynamicRunner runner = (DynamicRunner) aClass.newInstance(); //runner.runTransform(ds); Table result = runner.transTableObj(ingest); // write the result Table to the TableSink result.writeToSink(sink); env.execute(); } catch (Exception e) { e.printStackTrace(); } }
Example 10
Source File: UnitTestSuiteFlink.java From df_data_service with Apache License 2.0 | 4 votes |
public static void testFlinkSQL() { LOG.info("Only Unit Testing Function is enabled"); String resultFile = "/home/vagrant/test.txt"; try { String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath(); StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath) .setParallelism(1); String kafkaTopic = "finance"; String kafkaTopic_stage = "df_trans_stage_finance"; String kafkaTopic_out = "df_trans_out_finance"; StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env); Properties properties = new Properties(); properties.setProperty("bootstrap.servers", "localhost:9092"); properties.setProperty("group.id", "consumer3"); // Internal covert Json String to Json - Begin DataStream<String> stream = env .addSource(new FlinkKafkaConsumer09<>(kafkaTopic, new SimpleStringSchema(), properties)); stream.map(new MapFunction<String, String>() { @Override public String map(String jsonString) throws Exception { return jsonString.replaceAll("\\\\", "").replace("\"{", "{").replace("}\"","}"); } }).addSink(new FlinkKafkaProducer09<String>("localhost:9092", kafkaTopic_stage, new SimpleStringSchema())); // Internal covert Json String to Json - End String[] fieldNames = new String[] {"name"}; Class<?>[] fieldTypes = new Class<?>[] {String.class}; Kafka09AvroTableSource kafkaTableSource = new Kafka09AvroTableSource( kafkaTopic_stage, properties, fieldNames, fieldTypes); //kafkaTableSource.setFailOnMissingField(true); tableEnv.registerTableSource("Orders", kafkaTableSource); //Table result = tableEnv.sql("SELECT STREAM name FROM Orders"); Table result = tableEnv.sql("SELECT name FROM Orders"); Files.deleteIfExists(Paths.get(resultFile)); // create a TableSink TableSink sink = new CsvTableSink(resultFile, "|"); // write the result Table to the TableSink result.writeToSink(sink); env.execute("FlinkConsumer"); } catch (Exception e) { e.printStackTrace(); } }
Example 11
Source File: UnitTestSuiteFlink.java From df_data_service with Apache License 2.0 | 4 votes |
public static void testFlinkAvroSQLWithStaticSchema() { System.out.println("TestCase_Test Avro SQL with static Schema"); final String STATIC_USER_SCHEMA = "{" + "\"type\":\"record\"," + "\"name\":\"myrecord\"," + "\"fields\":[" + " { \"name\":\"symbol\", \"type\":\"string\" }," + " { \"name\":\"name\", \"type\":\"string\" }," + " { \"name\":\"exchangecode\", \"type\":\"string\" }" + "]}"; String resultFile = "/home/vagrant/test.txt"; String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath(); StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath) .setParallelism(1); StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env); Properties properties = new Properties(); properties.setProperty("bootstrap.servers", "localhost:9092"); properties.setProperty("group.id", "consumer_test"); properties.setProperty("schema.subject", "test-value"); properties.setProperty("schema.registry", "localhost:8081"); properties.setProperty("static.avro.schema", STATIC_USER_SCHEMA); try { Kafka09AvroTableSource kafkaAvroTableSource = new Kafka09AvroTableSource("test", properties); tableEnv.registerTableSource("Orders", kafkaAvroTableSource); //Table result = tableEnv.sql("SELECT STREAM name, symbol, exchange FROM Orders"); Table result = tableEnv.sql("SELECT name, symbol, exchangecode FROM Orders"); Files.deleteIfExists(Paths.get(resultFile)); // create a TableSink TableSink sink = new CsvTableSink(resultFile, "|"); // write the result Table to the TableSink result.writeToSink(sink); env.execute("Flink AVRO SQL KAFKA Test"); } catch (Exception e) { e.printStackTrace(); } }
Example 12
Source File: UnitTestSuiteFlink.java From df_data_service with Apache License 2.0 | 4 votes |
public static void testFlinkAvroScriptWithStaticSchema() { System.out.println("TestCase_Test Avro Table API Script with static Schema"); final String STATIC_USER_SCHEMA = "{" + "\"type\":\"record\"," + "\"name\":\"myrecord\"," + "\"fields\":[" + " { \"name\":\"symbol\", \"type\":\"string\" }," + " { \"name\":\"name\", \"type\":\"string\" }," + " { \"name\":\"exchangecode\", \"type\":\"string\" }" + "]}"; String jarPath = DFInitService.class.getProtectionDomain().getCodeSource().getLocation().getPath(); StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", 6123, jarPath) .setParallelism(1); StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env); Properties properties = new Properties(); properties.setProperty("bootstrap.servers", "localhost:9092"); properties.setProperty("group.id", "consumer_test"); properties.setProperty("schema.subject", "test-value"); properties.setProperty("schema.registry", "localhost:8081"); properties.setProperty("static.avro.schema", STATIC_USER_SCHEMA); try { Kafka09AvroTableSource kafkaAvroTableSource = new Kafka09AvroTableSource("test", properties); tableEnv.registerTableSource("Orders", kafkaAvroTableSource); Table ingest = tableEnv.scan("Orders"); String className = "dynamic.FlinkScript"; String header = "package dynamic;\n" + "import org.apache.flink.table.api.Table;\n" + "import com.datafibers.util.*;\n"; String transScript = "select(\"name\")"; String javaCode = header + "public class FlinkScript implements DynamicRunner {\n" + "@Override \n" + " public Table transTableObj(Table tbl) {\n" + "try {" + "return tbl."+ transScript + ";" + "} catch (Exception e) {" + "};" + "return null;}}"; // Dynamic code generation Class aClass = CompilerUtils.CACHED_COMPILER.loadFromJava(className, javaCode); DynamicRunner runner = (DynamicRunner) aClass.newInstance(); Table result = runner.transTableObj(ingest); Kafka09AvroTableSink sink = new Kafka09AvroTableSink ("test_json", properties, new FlinkFixedPartitioner()); // write the result Table to the TableSink result.writeToSink(sink); env.execute("Flink AVRO SQL KAFKA Test"); } catch (Exception e) { e.printStackTrace(); } }
Example 13
Source File: WordCountStream.java From df_data_service with Apache License 2.0 | 4 votes |
public static void main(String args[]) { final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env); // Create a DataStream from a list of elements //DataStream<Integer> ds = env.fromElements(1, 2, 3, 4, 5); CsvTableSource csvTableSource = new CsvTableSource( "/Users/will/Downloads/file.csv", new String[] { "name", "id", "score", "comments" }, new TypeInformation<?>[] { Types.STRING(), Types.STRING(), Types.STRING(), Types.STRING() }); // lenient tableEnv.registerTableSource("mycsv", csvTableSource); TableSink sink = new CsvTableSink("/Users/will/Downloads/out.csv", "|"); //tableEnv.registerDataStream("tbl", ds, "a"); //Table ingest = tableEnv.fromDataStream(ds, "name"); Table in = tableEnv.scan("mycsv"); //Table in = tableEnv.ingest("tbl"); //Table in = tableEnv.fromDataStream(ds, "a"); Table result = in.select("name"); result.writeToSink(sink); try { env.execute(); } catch (Exception e) { } System.out.print("DONE"); }