org.apache.flink.table.api.TableSchema Java Examples
The following examples show how to use
org.apache.flink.table.api.TableSchema.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source Project: flink Author: apache File: JdbcTableSource.java License: Apache License 2.0 | 6 votes |
private JdbcTableSource( JdbcOptions options, JdbcReadOptions readOptions, JdbcLookupOptions lookupOptions, TableSchema schema, int[] selectFields) { this.options = options; this.readOptions = readOptions; this.lookupOptions = lookupOptions; this.schema = schema; this.selectFields = selectFields; final DataType[] schemaDataTypes = schema.getFieldDataTypes(); final String[] schemaFieldNames = schema.getFieldNames(); if (selectFields != null) { DataType[] dataTypes = new DataType[selectFields.length]; String[] fieldNames = new String[selectFields.length]; for (int i = 0; i < selectFields.length; i++) { dataTypes[i] = schemaDataTypes[selectFields[i]]; fieldNames[i] = schemaFieldNames[selectFields[i]]; } this.producedDataType = TableSchema.builder().fields(fieldNames, dataTypes).build().toRowDataType(); } else { this.producedDataType = schema.toRowDataType(); } }
Example #2
Source Project: flink Author: apache File: LocalExecutorITCase.java License: Apache License 2.0 | 6 votes |
@Test public void testTableSchema() throws Exception { final Executor executor = createDefaultExecutor(clusterClient); final SessionContext session = new SessionContext("test-session", new Environment()); String sessionId = executor.openSession(session); assertEquals("test-session", sessionId); final TableSchema actualTableSchema = executor.getTableSchema(sessionId, "TableNumber2"); final TableSchema expectedTableSchema = new TableSchema( new String[]{"IntegerField2", "StringField2", "TimestampField2"}, new TypeInformation[]{Types.INT, Types.STRING, Types.SQL_TIMESTAMP}); assertEquals(expectedTableSchema, actualTableSchema); executor.closeSession(sessionId); }
Example #3
Source Project: flink Author: flink-tpc-ds File: DescriptorProperties.java License: Apache License 2.0 | 6 votes |
/** * Adds a table schema under the given key. */ public void putTableSchema(String key, TableSchema schema) { checkNotNull(key); checkNotNull(schema); final String[] fieldNames = schema.getFieldNames(); final TypeInformation<?>[] fieldTypes = schema.getFieldTypes(); final List<List<String>> values = new ArrayList<>(); for (int i = 0; i < schema.getFieldCount(); i++) { values.add(Arrays.asList(fieldNames[i], TypeStringUtils.writeTypeInfo(fieldTypes[i]))); } putIndexedFixedProperties( key, Arrays.asList(TABLE_SCHEMA_NAME, TABLE_SCHEMA_TYPE), values); }
Example #4
Source Project: flink Author: apache File: KafkaTableSourceSinkFactoryBase.java License: Apache License 2.0 | 6 votes |
@Override public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); final TableSchema schema = TableSchemaUtils.getPhysicalSchema( descriptorProperties.getTableSchema(SCHEMA)); final String topic = descriptorProperties.getString(CONNECTOR_TOPIC); final Optional<String> proctime = SchemaValidator.deriveProctimeAttribute(descriptorProperties); final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors = SchemaValidator.deriveRowtimeAttributes(descriptorProperties); // see also FLINK-9870 if (proctime.isPresent() || !rowtimeAttributeDescriptors.isEmpty() || checkForCustomFieldMapping(descriptorProperties, schema)) { throw new TableException("Time attributes and custom field mappings are not supported yet."); } return createKafkaTableSink( schema, topic, getKafkaProperties(descriptorProperties), getFlinkKafkaPartitioner(descriptorProperties), getSerializationSchema(properties)); }
Example #5
Source Project: flink Author: apache File: DataStreamConversionUtilTest.java License: Apache License 2.0 | 6 votes |
@Test public void testForceTypeWithTableSchema() { StreamExecutionEnvironment env = MLEnvironmentFactory.getDefault().getStreamExecutionEnvironment(); DataStream<Row> input = env.fromElements(Row.of("s1")).map(new DataStreamConversionUtilTest.GenericTypeMap()); Table table2 = DataStreamConversionUtil.toTable(MLEnvironmentFactory.DEFAULT_ML_ENVIRONMENT_ID, input, new TableSchema( new String[]{"word"}, new TypeInformation[]{TypeInformation.of(Integer.class)} ) ); Assert.assertEquals( new TableSchema(new String[] {"word"}, new TypeInformation[] {TypeInformation.of(Integer.class)}), table2.getSchema() ); }
Example #6
Source Project: flink Author: apache File: Elasticsearch6DynamicSinkFactoryTest.java License: Apache License 2.0 | 6 votes |
@Test public void validateEmptyConfiguration() { Elasticsearch6DynamicSinkFactory sinkFactory = new Elasticsearch6DynamicSinkFactory(); thrown.expect(ValidationException.class); thrown.expectMessage( "One or more required options are missing.\n" + "\n" + "Missing required options are:\n" + "\n" + "document-type\n" + "hosts\n" + "index"); sinkFactory.createDynamicTableSink( context() .withSchema(TableSchema.builder() .field("a", DataTypes.TIME()) .build()) .build() ); }
Example #7
Source Project: flink Author: apache File: TableFormatFactoryBaseTest.java License: Apache License 2.0 | 6 votes |
@Test public void testSchemaDerivationWithRowtime() { final Map<String, String> properties = new HashMap<>(); properties.put("schema.0.name", "otherField"); properties.put("schema.0.type", "VARCHAR"); properties.put("schema.0.from", "csvField"); properties.put("schema.1.name", "abcField"); properties.put("schema.1.type", "VARCHAR"); properties.put("schema.2.name", "p"); properties.put("schema.2.type", "TIMESTAMP"); properties.put("schema.2.proctime", "true"); properties.put("schema.3.name", "r"); properties.put("schema.3.type", "TIMESTAMP"); properties.put("schema.3.rowtime.timestamps.type", "from-field"); // from-field strategy properties.put("schema.3.rowtime.timestamps.from", "myTime"); properties.put("schema.3.rowtime.watermarks.type", "from-source"); final TableSchema actualSchema = TableFormatFactoryBase.deriveSchema(properties); final TableSchema expectedSchema = TableSchema.builder() .field("csvField", Types.STRING) // aliased .field("abcField", Types.STRING) .field("myTime", Types.SQL_TIMESTAMP) .build(); assertEquals(expectedSchema, actualSchema); }
Example #8
Source Project: alchemy Author: binglind File: SideStream.java License: Apache License 2.0 | 6 votes |
private static RowTypeInfo createReturnType(TableSchema leftTable, RowTypeInfo sideType) { String[] leftFields = leftTable.getColumnNames(); TypeInformation[] leftTypes = leftTable.getTypes(); int leftArity = leftFields.length; int rightArity = sideType.getArity(); int size = leftArity + rightArity; String[] columnNames = new String[size]; TypeInformation[] columnTypes = new TypeInformation[size]; for (int i = 0; i < leftArity; i++) { columnNames[i] = leftFields[i]; columnTypes[i] = leftTypes[i]; } for (int i = 0; i < rightArity; i++) { columnNames[leftArity + i] = sideType.getFieldNames()[i]; columnTypes[leftArity + i] = sideType.getTypeAt(i); } return new RowTypeInfo(columnTypes, columnNames); }
Example #9
Source Project: Flink-CEPplus Author: ljygz File: Kafka011TableSourceSinkFactoryTest.java License: Apache License 2.0 | 6 votes |
@Override protected KafkaTableSourceBase getExpectedKafkaTableSource( TableSchema schema, Optional<String> proctimeAttribute, List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors, Map<String, String> fieldMapping, String topic, Properties properties, DeserializationSchema<Row> deserializationSchema, StartupMode startupMode, Map<KafkaTopicPartition, Long> specificStartupOffsets) { return new Kafka011TableSource( schema, proctimeAttribute, rowtimeAttributeDescriptors, Optional.of(fieldMapping), topic, properties, deserializationSchema, startupMode, specificStartupOffsets ); }
Example #10
Source Project: Alink Author: alibaba File: VectorStandardScalerMapperTest.java License: Apache License 2.0 | 6 votes |
@Test public void testDense() throws Exception { Row[] rows = new Row[]{ Row.of(0L, "{\"withMean\":\"true\",\"selectedCol\":\"\\\"vec\\\"\",\"withStd\":\"true\"}", null), Row.of(1048576L, "[1.3333333333333333,0.3333333333333333]", null), Row.of(2097152L, "[2.5166114784235836,2.886751345948129]", null) }; List<Row> model = Arrays.asList(rows); TableSchema dataSchema = new TableSchema( new String[]{"vec"}, new TypeInformation<?>[]{Types.STRING} ); Params params = new Params(); VectorStandardScalerModelMapper mapper = new VectorStandardScalerModelMapper(modelSchema, dataSchema, params); mapper.loadModel(model); assertEquals(mapper.map(Row.of(new DenseVector(new double[]{1.0, 2.0}))).getField(0), new DenseVector(new double[]{-0.13245323570650433, 0.5773502691896257})); }
Example #11
Source Project: Alink Author: alibaba File: VectorAssemblerMapperTest.java License: Apache License 2.0 | 6 votes |
@Test public void testToSparse() throws Exception { TableSchema schema = new TableSchema(new String[]{"c0", "c1", "c2"}, new TypeInformation<?>[]{Types.STRING, Types.DOUBLE, Types.STRING}); TableSchema outSchema = new TableSchema(new String[]{"c0", "out"}, new TypeInformation<?>[]{Types.STRING, VectorTypes.VECTOR}); Params params = new Params() .set(VectorAssemblerParams.SELECTED_COLS, new String[]{"c0", "c1", "c2"}) .set(VectorAssemblerParams.OUTPUT_COL, "out") .set(VectorAssemblerParams.RESERVED_COLS, new String[]{"c0"}); VectorAssemblerMapper mapper = new VectorAssemblerMapper(schema, params); /* only reverse one column. */ assertEquals(mapper.map(Row.of(new DenseVector(new double[]{3.0, 4.0}), 3.0, new SparseVector(11, new int[]{0, 10}, new double[]{1.0, 4.0}))).getField(1), new SparseVector(14, new int[]{0, 1, 2, 3, 13}, new double[]{3.0, 4.0, 3.0, 1.0, 4.0})); assertEquals(mapper.getOutputSchema(), outSchema); }
Example #12
Source Project: Alink Author: alibaba File: ModelExporterUtils.java License: Apache License 2.0 | 6 votes |
/** * Unpack a BatchOperator. */ private static BatchOperator unpackBatchOp(BatchOperator data, TableSchema schema) { DataSet<Row> rows = data.getDataSet(); final TypeInformation[] types = schema.getFieldTypes(); rows = rows.map(new RichMapFunction<Row, Row>() { private transient CsvParser parser; @Override public void open(Configuration parameters) throws Exception { parser = new CsvParser(types, "^", '\''); } @Override public Row map(Row value) throws Exception { return parser.parse((String) value.getField(1)).f1; } }); return BatchOperator.fromTable(DataSetConversionUtil.toTable(data.getMLEnvironmentId(), rows, schema)) .setMLEnvironmentId(data.getMLEnvironmentId()); }
Example #13
Source Project: Alink Author: alibaba File: StringParsersTest.java License: Apache License 2.0 | 6 votes |
@Test public void testJsonParser() throws Exception { String jsonStr = "{\n" + " \"media_name\": \"Titanic\",\n" + " \"title\": \"Titanic\",\n" + " \"compare_point\": 0.0001,\n" + " \"spider_point\": 0.0000,\n" + " \"search_point\": 0.6,\n" + " \"collection_id\": 123456,\n" + " \"media_id\": 3214\n" + "}"; String schemaStr = "media_name string, title string, compare_point double, spider_point double, search_point double, " + "collection_id bigint, media_id bigint"; TableSchema schema = CsvUtil.schemaStr2Schema(schemaStr); StringParsers.JsonParser parser = new StringParsers.JsonParser(schema.getFieldNames(), schema.getFieldTypes()); Tuple2<Boolean, Row> parsed = parser.parse(jsonStr); Assert.assertTrue(parsed.f0); Assert.assertEquals(parsed.f1.getArity(), 7); }
Example #14
Source Project: flink Author: apache File: Elasticsearch7DynamicSinkFactoryTest.java License: Apache License 2.0 | 6 votes |
@Test public void validateWrongHosts() { Elasticsearch7DynamicSinkFactory sinkFactory = new Elasticsearch7DynamicSinkFactory(); thrown.expect(ValidationException.class); thrown.expectMessage( "Could not parse host 'wrong-host' in option 'hosts'. It should follow the format 'http://host_name:port'."); sinkFactory.createDynamicTableSink( context() .withSchema(TableSchema.builder() .field("a", DataTypes.TIME()) .build()) .withOption("index", "MyIndex") .withOption("hosts", "wrong-host") .build() ); }
Example #15
Source Project: Alink Author: alibaba File: DataSetConversionUtilTest.java License: Apache License 2.0 | 6 votes |
@Test public void testForceType() { ExecutionEnvironment env = MLEnvironmentFactory.getDefault().getExecutionEnvironment(); DataSet<Row> input = env.fromElements(Row.of("s1")).map(new GenericTypeMap()); Table table2 = DataSetConversionUtil.toTable(MLEnvironmentFactory.DEFAULT_ML_ENVIRONMENT_ID, input, new String[] {"word"}, new TypeInformation[] {TypeInformation.of(Integer.class)} ); Assert.assertEquals( new TableSchema(new String[] {"word"}, new TypeInformation[] {TypeInformation.of(Integer.class)}), table2.getSchema() ); }
Example #16
Source Project: Alink Author: alibaba File: VectorMaxAbsScalerMapperTest.java License: Apache License 2.0 | 6 votes |
@Test public void testSparse() throws Exception { Row[] rows = new Row[]{ Row.of(0L, "{}", null), Row.of(1048576L, "[4.0,0.0,3.0]", null)}; List<Row> model = Arrays.asList(rows); TableSchema dataSchema = new TableSchema( new String[]{"vec"}, new TypeInformation<?>[]{Types.STRING} ); Params params = new Params(); VectorMaxAbsScalerModelMapper mapper = new VectorMaxAbsScalerModelMapper(modelSchema, dataSchema, params); mapper.loadModel(model); assertEquals(mapper.map(Row.of(new SparseVector(3, new int[]{0,2}, new double[]{1.0, 2.0}))).getField(0), new SparseVector(3, new int[]{0,2}, new double[]{0.25, 0.6666666666666666})); }
Example #17
Source Project: flink Author: apache File: MaterializedCollectStreamResult.java License: Apache License 2.0 | 6 votes |
public MaterializedCollectStreamResult( TableSchema tableSchema, ExecutionConfig config, InetAddress gatewayAddress, int gatewayPort, int maxRowCount, ClassLoader classLoader) { this( tableSchema, config, gatewayAddress, gatewayPort, maxRowCount, computeMaterializedTableOvercommit(maxRowCount), classLoader); }
Example #18
Source Project: Flink-CEPplus Author: ljygz File: CsvRowFormatFactoryTest.java License: Apache License 2.0 | 6 votes |
@Test public void testSchemaDerivation() { final Map<String, String> properties = new HashMap<>(); properties.putAll(new Schema().schema(TableSchema.fromTypeInfo(SCHEMA)).toProperties()); properties.putAll(new Csv().deriveSchema().toProperties()); final CsvRowSerializationSchema expectedSer = new CsvRowSerializationSchema.Builder(SCHEMA).build(); final CsvRowDeserializationSchema expectedDeser = new CsvRowDeserializationSchema.Builder(SCHEMA).build(); final SerializationSchema<?> actualSer = TableFactoryService .find(SerializationSchemaFactory.class, properties) .createSerializationSchema(properties); assertEquals(expectedSer, actualSer); final DeserializationSchema<?> actualDeser = TableFactoryService .find(DeserializationSchemaFactory.class, properties) .createDeserializationSchema(properties); assertEquals(expectedDeser, actualDeser); }
Example #19
Source Project: Alink Author: alibaba File: KMeansOldModelMapper2Test.java License: Apache License 2.0 | 6 votes |
@Test public void testHaversineDistance() throws Exception { Row[] rows = new Row[] { Row.of(0L, "{\"vectorCol\":null,\"latitudeCol\":\"\\\"f1\\\"\",\"longitudeCol\":\"\\\"f0\\\"\"," + "\"distanceType\":\"\\\"HAVERSINE\\\"\",\"k\":\"2\",\"modelSchema\":\"\\\"model_id bigint," + "model_info string\\\"\",\"isNewFormat\":\"true\",\"vectorSize\":\"2\"}"), Row.of(1048576L, "{\"center\":\"{\\\"data\\\":[8.33,9.0]}\",\"clusterId\":0,\"weight\":3.0}"), Row.of(2097152L, "{\"center\":\"{\\\"data\\\":[1.0,1.33]}\",\"clusterId\":1,\"weight\":3.0}") }; List<Row> model = Arrays.asList(rows); TableSchema modelSchema = new KMeansModelDataConverter().getModelSchema(); TableSchema dataSchema = new TableSchema( new String[] {"f0", "f1"}, new TypeInformation<?>[] {Types.DOUBLE, Types.DOUBLE} ); Params params = new Params() .set(KMeansPredictParams.PREDICTION_COL, "pred"); KMeansModelMapper mapper = new KMeansModelMapper(modelSchema, dataSchema, params); mapper.loadModel(model); assertEquals(mapper.map(Row.of(0, 0)).getField(2), 1L); assertEquals(mapper.getOutputSchema(), new TableSchema(new String[] {"f0", "f1", "pred"}, new TypeInformation<?>[] {Types.DOUBLE, Types.DOUBLE, Types.LONG})); }
Example #20
Source Project: Flink-CEPplus Author: ljygz File: DescriptorProperties.java License: Apache License 2.0 | 6 votes |
/** * Adds a table schema under the given key. */ public void putTableSchema(String key, TableSchema schema) { checkNotNull(key); checkNotNull(schema); final String[] fieldNames = schema.getFieldNames(); final TypeInformation<?>[] fieldTypes = schema.getFieldTypes(); final List<List<String>> values = new ArrayList<>(); for (int i = 0; i < schema.getFieldCount(); i++) { values.add(Arrays.asList(fieldNames[i], TypeStringUtils.writeTypeInfo(fieldTypes[i]))); } putIndexedFixedProperties( key, Arrays.asList(TABLE_SCHEMA_NAME, TABLE_SCHEMA_TYPE), values); }
Example #21
Source Project: Alink Author: alibaba File: ModelMapBatchOp.java License: Apache License 2.0 | 6 votes |
@Override public T linkFrom(BatchOperator<?>... inputs) { checkOpSize(2, inputs); try { BroadcastVariableModelSource modelSource = new BroadcastVariableModelSource(BROADCAST_MODEL_TABLE_NAME); ModelMapper mapper = this.mapperBuilder.apply( inputs[0].getSchema(), inputs[1].getSchema(), this.getParams()); DataSet<Row> modelRows = inputs[0].getDataSet().rebalance(); DataSet<Row> resultRows = inputs[1].getDataSet() .map(new ModelMapperAdapter(mapper, modelSource)) .withBroadcastSet(modelRows, BROADCAST_MODEL_TABLE_NAME); TableSchema outputSchema = mapper.getOutputSchema(); this.setOutput(resultRows, outputSchema); return (T) this; } catch (Exception ex) { throw new RuntimeException(ex); } }
Example #22
Source Project: Flink-CEPplus Author: ljygz File: TableFormatFactoryBaseTest.java License: Apache License 2.0 | 6 votes |
@Test public void testSchemaDerivationWithRowtime() { final Map<String, String> properties = new HashMap<>(); properties.put("schema.0.name", "otherField"); properties.put("schema.0.type", "VARCHAR"); properties.put("schema.0.from", "csvField"); properties.put("schema.1.name", "abcField"); properties.put("schema.1.type", "VARCHAR"); properties.put("schema.2.name", "p"); properties.put("schema.2.type", "TIMESTAMP"); properties.put("schema.2.proctime", "true"); properties.put("schema.3.name", "r"); properties.put("schema.3.type", "TIMESTAMP"); properties.put("schema.3.rowtime.timestamps.type", "from-field"); // from-field strategy properties.put("schema.3.rowtime.timestamps.from", "myTime"); properties.put("schema.3.rowtime.watermarks.type", "from-source"); final TableSchema actualSchema = TableFormatFactoryBase.deriveSchema(properties); final TableSchema expectedSchema = TableSchema.builder() .field("csvField", Types.STRING) // aliased .field("abcField", Types.STRING) .field("myTime", Types.SQL_TIMESTAMP) .build(); assertEquals(expectedSchema, actualSchema); }
Example #23
Source Project: Alink Author: alibaba File: ImputerMapperTest.java License: Apache License 2.0 | 6 votes |
@Test public void testMax() throws Exception { Row[] rows = new Row[]{ Row.of(0L, "{\"selectedCols\":\"[\\\"f_double\\\",\\\"f_long\\\",\\\"f_int\\\"]\",\"strategy\":\"\\\"min\\\"\"}", null, null, null), Row.of(1048576L, "[2.0, 2.0, 2.0]", null, null, null) }; List<Row> model = Arrays.asList(rows); TableSchema dataSchema = new TableSchema( new String[]{"f_string", "f_long", "f_int", "f_double", "f_boolean"}, new TypeInformation<?>[]{Types.STRING, Types.LONG, Types.INT, Types.DOUBLE, Types.BOOLEAN} ); Params params = new Params(); ImputerModelMapper mapper = new ImputerModelMapper(modelSchema, dataSchema, params); mapper.loadModel(model); assertEquals(mapper.map(Row.of("a", null, null, null, true)).getField(1), 2L); assertEquals(mapper.map(Row.of("a", null, null, null, true)).getField(2), 2); assertEquals((double) mapper.map(Row.of("a", null, null, null, true)).getField(3), 2.0, 10e-4); }
Example #24
Source Project: Alink Author: alibaba File: RichModelMapper.java License: Apache License 2.0 | 6 votes |
public RichModelMapper(TableSchema modelSchema, TableSchema dataSchema, Params params) { super(modelSchema, dataSchema, params); String[] reservedColNames = this.params.get(RichModelMapperParams.RESERVED_COLS); String predResultColName = this.params.get(RichModelMapperParams.PREDICTION_COL); TypeInformation predResultColType = initPredResultColType(); isPredDetail = params.contains(RichModelMapperParams.PREDICTION_DETAIL_COL); if (isPredDetail) { String predDetailColName = params.get(RichModelMapperParams.PREDICTION_DETAIL_COL); this.outputColsHelper = new OutputColsHelper(dataSchema, new String[]{predResultColName, predDetailColName}, new TypeInformation[]{predResultColType, Types.STRING}, reservedColNames); } else { this.outputColsHelper = new OutputColsHelper(dataSchema, predResultColName, predResultColType, reservedColNames); } }
Example #25
Source Project: flink Author: flink-tpc-ds File: Kafka09TableSourceSinkFactoryTest.java License: Apache License 2.0 | 6 votes |
@Override protected KafkaTableSinkBase getExpectedKafkaTableSink( TableSchema schema, String topic, Properties properties, Optional<FlinkKafkaPartitioner<Row>> partitioner, SerializationSchema<Row> serializationSchema) { return new Kafka09TableSink( schema, topic, properties, partitioner, serializationSchema ); }
Example #26
Source Project: flink Author: apache File: HiveTableFactoryTest.java License: Apache License 2.0 | 5 votes |
@Test public void testHiveTable() throws Exception { TableSchema schema = TableSchema.builder() .field("name", DataTypes.STRING()) .field("age", DataTypes.INT()) .build(); Map<String, String> properties = new HashMap<>(); properties.put(CatalogConfig.IS_GENERIC, String.valueOf(false)); catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true); ObjectPath path = new ObjectPath("mydb", "mytable"); CatalogTable table = new CatalogTableImpl(schema, properties, "hive table"); catalog.createTable(path, table, true); Optional<TableFactory> opt = catalog.getTableFactory(); assertTrue(opt.isPresent()); HiveTableFactory tableFactory = (HiveTableFactory) opt.get(); TableSink tableSink = tableFactory.createTableSink(new TableSinkFactoryContextImpl( ObjectIdentifier.of("mycatalog", "mydb", "mytable"), table, new Configuration(), true)); assertTrue(tableSink instanceof HiveTableSink); TableSource tableSource = tableFactory.createTableSource(new TableSourceFactoryContextImpl( ObjectIdentifier.of("mycatalog", "mydb", "mytable"), table, new Configuration())); assertTrue(tableSource instanceof HiveTableSource); }
Example #27
Source Project: flink Author: apache File: DataTypeUtilsTest.java License: Apache License 2.0 | 5 votes |
@Test public void testExpandDistinctType() { FieldsDataType dataType = (FieldsDataType) ROW( FIELD("f0", INT()), FIELD("f1", STRING()), FIELD("f2", TIMESTAMP(5).bridgedTo(Timestamp.class)), FIELD("f3", TIMESTAMP(3))); LogicalType originalLogicalType = dataType.getLogicalType(); DistinctType distinctLogicalType = DistinctType.newBuilder( ObjectIdentifier.of("catalog", "database", "type"), originalLogicalType) .build(); DataType distinctDataType = new FieldsDataType(distinctLogicalType, dataType.getChildren()); TableSchema schema = DataTypeUtils.expandCompositeTypeToSchema(distinctDataType); assertThat( schema, equalTo( TableSchema.builder() .field("f0", INT()) .field("f1", STRING()) .field("f2", TIMESTAMP(5).bridgedTo(Timestamp.class)) .field("f3", TIMESTAMP(3).bridgedTo(LocalDateTime.class)) .build())); }
Example #28
Source Project: flink Author: apache File: MergeTableLikeUtilTest.java License: Apache License 2.0 | 5 votes |
@Test public void mergeOverwritingGeneratedColumnsDuplicate() { TableSchema sourceSchema = TableSchema.builder() .field("one", DataTypes.INT()) .field("two", DataTypes.INT(), "one + 1") .build(); List<SqlNode> derivedColumns = Collections.singletonList( tableColumn("two", plus("one", "3"))); Map<FeatureOption, MergingStrategy> mergingStrategies = getDefaultMergingStrategies(); mergingStrategies.put(FeatureOption.GENERATED, MergingStrategy.OVERWRITING); TableSchema mergedSchema = util.mergeTables( mergingStrategies, sourceSchema, derivedColumns, Collections.emptyList(), null); TableSchema expectedSchema = TableSchema.builder() .field("one", DataTypes.INT()) .field("two", DataTypes.INT(), "`one` + 3") .build(); assertThat(mergedSchema, equalTo(expectedSchema)); }
Example #29
Source Project: Flink-CEPplus Author: ljygz File: LocalExecutorITCase.java License: Apache License 2.0 | 5 votes |
@Test public void testTableSchema() throws Exception { final Executor executor = createDefaultExecutor(clusterClient); final SessionContext session = new SessionContext("test-session", new Environment()); final TableSchema actualTableSchema = executor.getTableSchema(session, "TableNumber2"); final TableSchema expectedTableSchema = new TableSchema( new String[] {"IntegerField2", "StringField2"}, new TypeInformation[] {Types.INT, Types.STRING}); assertEquals(expectedTableSchema, actualTableSchema); }
Example #30
Source Project: Alink Author: alibaba File: VectorSizeHintMapperTest.java License: Apache License 2.0 | 5 votes |
@Test public void testError() throws Exception { TableSchema schema = new TableSchema(new String[] {"vec"}, new TypeInformation <?>[] {Types.STRING}); Params params = new Params() .set(VectorSizeHintParams.SELECTED_COL, "vec") .set(VectorSizeHintParams.SIZE, 3); VectorSizeHintMapper mapper = new VectorSizeHintMapper(schema, params); assertEquals(mapper.getOutputSchema(), new TableSchema(new String[] {"vec"}, new TypeInformation <?>[] {VectorTypes.VECTOR})); }