Java Code Examples for org.apache.flink.table.descriptors.Schema

The following examples show how to use org.apache.flink.table.descriptors.Schema. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: Flink-CEPplus   Source File: CsvRowFormatFactoryTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSchemaDerivation() {
	final Map<String, String> properties = new HashMap<>();
	properties.putAll(new Schema().schema(TableSchema.fromTypeInfo(SCHEMA)).toProperties());
	properties.putAll(new Csv().deriveSchema().toProperties());

	final CsvRowSerializationSchema expectedSer = new CsvRowSerializationSchema.Builder(SCHEMA).build();
	final CsvRowDeserializationSchema expectedDeser = new CsvRowDeserializationSchema.Builder(SCHEMA).build();

	final SerializationSchema<?> actualSer = TableFactoryService
		.find(SerializationSchemaFactory.class, properties)
		.createSerializationSchema(properties);

	assertEquals(expectedSer, actualSer);

	final DeserializationSchema<?> actualDeser = TableFactoryService
		.find(DeserializationSchemaFactory.class, properties)
		.createDeserializationSchema(properties);

	assertEquals(expectedDeser, actualDeser);
}
 
Example 2
Source Project: flink   Source File: TableFactoryService.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Performs filtering for special cases (i.e. table format factories with schema derivation).
 */
private static List<String> filterSupportedPropertiesFactorySpecific(TableFactory factory, List<String> keys) {

	if (factory instanceof TableFormatFactory) {
		boolean includeSchema = ((TableFormatFactory) factory).supportsSchemaDerivation();
		return keys.stream().filter(k -> {
			if (includeSchema) {
				return k.startsWith(Schema.SCHEMA + ".") ||
					k.startsWith(FormatDescriptorValidator.FORMAT + ".");
			} else {
				return k.startsWith(FormatDescriptorValidator.FORMAT + ".");
			}
		}).collect(Collectors.toList());
	} else {
		return keys;
	}
}
 
Example 3
Source Project: flink   Source File: CatalogTableImpTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testToProperties() {
	TableSchema schema = createTableSchema();
	Map<String, String> prop = createProperties();
	CatalogTable table = new CatalogTableImpl(
		schema,
		createPartitionKeys(),
		prop,
		TEST
	);

	DescriptorProperties descriptorProperties = new DescriptorProperties();
	descriptorProperties.putProperties(table.toProperties());

	assertEquals(schema, descriptorProperties.getTableSchema(Schema.SCHEMA));
}
 
Example 4
Source Project: flink   Source File: CsvRowFormatFactoryTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSchemaDerivation() {
	final Map<String, String> properties = new HashMap<>();
	properties.putAll(new Schema().schema(TableSchema.fromTypeInfo(SCHEMA)).toProperties());
	properties.putAll(new Csv().deriveSchema().toProperties());

	final CsvRowSerializationSchema expectedSer = new CsvRowSerializationSchema.Builder(SCHEMA).build();
	final CsvRowDeserializationSchema expectedDeser = new CsvRowDeserializationSchema.Builder(SCHEMA).build();

	final SerializationSchema<?> actualSer = TableFactoryService
		.find(SerializationSchemaFactory.class, properties)
		.createSerializationSchema(properties);

	assertEquals(expectedSer, actualSer);

	final DeserializationSchema<?> actualDeser = TableFactoryService
		.find(DeserializationSchemaFactory.class, properties)
		.createDeserializationSchema(properties);

	assertEquals(expectedDeser, actualDeser);
}
 
Example 5
/**
 * Processing time attribute should be of type TIMESTAMP.
 */
@Test (expected = ValidationException.class)
public void testWrongProcTimeAttributeType() {
    final Schema schema = new Schema()
            .field("name", DataTypes.STRING())
            .field("age", DataTypes.INT()).proctime();

    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);
    pravega.tableSourceReaderBuilder()
            .forStream(stream)
            .withPravegaConfig(PRAVEGA_CONFIG);
    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(JSON)
            .withSchema(schema)
            .inAppendMode();
    final Map<String, String> propertiesMap = testDesc.toProperties();
    FlinkPravegaTableFactoryBase tableFactoryBase = new FlinkPravegaStreamTableSourceFactory();
    tableFactoryBase.createFlinkPravegaTableSource(propertiesMap);
    fail("Schema validation failed");
}
 
Example 6
/**
 * Rowtime attribute should be of type TIMESTAMP.
 */
@Test (expected = ValidationException.class)
public void testWrongRowTimeAttributeType() {
    final Schema schema = new Schema()
            .field("name", DataTypes.STRING())
            .field("age", DataTypes.INT()).rowtime(new Rowtime()
                                                            .timestampsFromField("age")
                                                            .watermarksFromStrategy(
                                                                    new BoundedOutOfOrderTimestamps(30000L)));
    Pravega pravega = new Pravega();
    Stream stream = Stream.of(SCOPE, STREAM);
    pravega.tableSourceReaderBuilder()
            .forStream(stream)
            .withPravegaConfig(PRAVEGA_CONFIG);
    final TestTableDescriptor testDesc = new TestTableDescriptor(pravega)
            .withFormat(JSON)
            .withSchema(schema)
            .inAppendMode();
    final Map<String, String> propertiesMap = testDesc.toProperties();
    FlinkPravegaTableFactoryBase tableFactoryBase = new FlinkPravegaStreamTableSourceFactory();
    tableFactoryBase.createFlinkPravegaTableSource(propertiesMap);
    fail("Schema validation failed");
}
 
Example 7
Source Project: flink   Source File: KafkaTableSourceSinkFactoryTestBase.java    License: Apache License 2.0 6 votes vote down vote up
protected Map<String, String> createKafkaSourceProperties() {
	return new TestTableDescriptor(
			new Kafka()
				.version(getKafkaVersion())
				.topic(TOPIC)
				.properties(KAFKA_PROPERTIES)
				.sinkPartitionerRoundRobin() // test if accepted although not needed
				.startFromSpecificOffsets(OFFSETS))
			.withFormat(new TestTableFormat())
			.withSchema(
				new Schema()
					.field(FRUIT_NAME, DataTypes.STRING()).from(NAME)
					.field(COUNT, DataTypes.DECIMAL(38, 18)) // no from so it must match with the input
					.field(EVENT_TIME, DataTypes.TIMESTAMP(3)).rowtime(
						new Rowtime().timestampsFromField(TIME).watermarksPeriodicAscending())
					.field(PROC_TIME, DataTypes.TIMESTAMP(3)).proctime())
			.toProperties();
}
 
Example 8
Source Project: flink   Source File: KafkaTableSourceSinkFactoryTestBase.java    License: Apache License 2.0 6 votes vote down vote up
protected Map<String, String> createKafkaSinkProperties() {
	return new TestTableDescriptor(
		new Kafka()
			.version(getKafkaVersion())
			.topic(TOPIC)
			.properties(KAFKA_PROPERTIES)
			.sinkPartitionerFixed()
			.startFromSpecificOffsets(OFFSETS)) // test if they accepted although not needed
		.withFormat(new TestTableFormat())
		.withSchema(
			new Schema()
				.field(FRUIT_NAME, DataTypes.STRING())
				.field(COUNT, DataTypes.DECIMAL(10, 4))
				.field(EVENT_TIME, DataTypes.TIMESTAMP(3)))
		.inAppendMode()
		.toProperties();
}
 
Example 9
Source Project: flink   Source File: TableFactoryService.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Performs filtering for special cases (i.e. table format factories with schema derivation).
 */
private static List<String> filterSupportedPropertiesFactorySpecific(TableFactory factory, List<String> keys) {

	if (factory instanceof TableFormatFactory) {
		boolean includeSchema = ((TableFormatFactory) factory).supportsSchemaDerivation();
		return keys.stream().filter(k -> {
			if (includeSchema) {
				return k.startsWith(Schema.SCHEMA + ".") ||
					k.startsWith(FormatDescriptorValidator.FORMAT + ".");
			} else {
				return k.startsWith(FormatDescriptorValidator.FORMAT + ".");
			}
		}).collect(Collectors.toList());
	} else {
		return keys;
	}
}
 
Example 10
Source Project: flink   Source File: CatalogTableImpTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testToProperties() {
	TableSchema schema = createTableSchema();
	Map<String, String> prop = createProperties();
	CatalogTable table = new CatalogTableImpl(
		schema,
		createPartitionKeys(),
		prop,
		TEST
	);

	DescriptorProperties descriptorProperties = new DescriptorProperties();
	descriptorProperties.putProperties(table.toProperties());

	assertEquals(schema, descriptorProperties.getTableSchema(Schema.SCHEMA));
}
 
Example 11
Source Project: flink   Source File: CsvRowFormatFactoryTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSchemaDerivation() {
	final Map<String, String> properties = new HashMap<>();
	properties.putAll(new Schema().schema(TableSchema.fromTypeInfo(SCHEMA)).toProperties());
	properties.putAll(new Csv().toProperties());

	final CsvRowSerializationSchema expectedSer = new CsvRowSerializationSchema.Builder(SCHEMA).build();
	final CsvRowDeserializationSchema expectedDeser = new CsvRowDeserializationSchema.Builder(SCHEMA).build();

	final SerializationSchema<?> actualSer = TableFactoryService
		.find(SerializationSchemaFactory.class, properties)
		.createSerializationSchema(properties);

	assertEquals(expectedSer, actualSer);

	final DeserializationSchema<?> actualDeser = TableFactoryService
		.find(DeserializationSchemaFactory.class, properties)
		.createDeserializationSchema(properties);

	assertEquals(expectedDeser, actualDeser);
}
 
Example 12
Source Project: Flink-CEPplus   Source File: JsonRowFormatFactoryTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSchemaDerivation() {
	final Map<String, String> properties = toMap(
		new Schema()
			.field("field1", Types.BOOLEAN())
			.field("field2", Types.INT())
			.field("proctime", Types.SQL_TIMESTAMP()).proctime(),
		new Json()
			.deriveSchema());

	testSchemaSerializationSchema(properties);

	testSchemaDeserializationSchema(properties);
}
 
Example 13
Source Project: flink-learning   Source File: TableExampleWordCount.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment blinkStreamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
    blinkStreamEnv.setParallelism(1);
    EnvironmentSettings blinkStreamSettings = EnvironmentSettings.newInstance()
            .useBlinkPlanner()
            .inStreamingMode()
            .build();
    StreamTableEnvironment blinkStreamTableEnv = StreamTableEnvironment.create(blinkStreamEnv, blinkStreamSettings);

    String path = TableExampleWordCount.class.getClassLoader().getResource("words.txt").getPath();
    blinkStreamTableEnv
            .connect(new FileSystem().path(path))
            .withFormat(new OldCsv().field("word", Types.STRING).lineDelimiter("\n"))
            .withSchema(new Schema().field("word", Types.STRING))
            .inAppendMode()
            .registerTableSource("FlieSourceTable");

    Table wordWithCount = blinkStreamTableEnv.scan("FlieSourceTable")
            .groupBy("word")
            .select("word,count(word) as _count");
    blinkStreamTableEnv.toRetractStream(wordWithCount, Row.class).print();

    //打印结果中的 true 和 false,可能会有点疑问,为啥会多出一个字段。
    //Sink 做的事情是先删除再插入,false 表示删除上一条数据,true 表示插入该条数据

    blinkStreamTableEnv.execute("Blink Stream SQL Job");
}
 
Example 14
Source Project: flink   Source File: TableEnvironmentTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testConnect() throws Exception {
	final TableEnvironmentMock tableEnv = TableEnvironmentMock.getStreamingInstance();

	tableEnv
		.connect(new ConnectorDescriptorMock(TableSourceFactoryMock.CONNECTOR_TYPE_VALUE, 1, true))
		.withFormat(new FormatDescriptorMock("my_format", 1))
		.withSchema(new Schema()
			.field("my_field_0", "INT")
			.field("my_field_1", "BOOLEAN"))
		.inAppendMode()
		.registerTableSource("my_table");

	final Catalog catalog = tableEnv.getCatalog(EnvironmentSettings.DEFAULT_BUILTIN_CATALOG)
		.orElseThrow(AssertionError::new);

	final CatalogBaseTable table = catalog
		.getTable(new ObjectPath(EnvironmentSettings.DEFAULT_BUILTIN_DATABASE, "my_table"));

	assertThat(
		table.getSchema(),
		equalTo(
			TableSchema.builder()
				.field("my_field_0", DataTypes.INT())
				.field("my_field_1", DataTypes.BOOLEAN())
				.build()));

	final ConnectorCatalogTable<?, ?> connectorCatalogTable = (ConnectorCatalogTable<?, ?>) table;

	assertThat(
		connectorCatalogTable.getTableSource().isPresent(),
		equalTo(true));
}
 
Example 15
Source Project: flink   Source File: TableSourceFactoryMock.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSource<Row> createTableSource(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = new DescriptorProperties();
	descriptorProperties.putProperties(properties);
	final TableSchema schema = descriptorProperties.getTableSchema(Schema.SCHEMA);
	return new TableSourceMock(schema.toRowDataType(), schema);
}
 
Example 16
Source Project: flink   Source File: JsonRowFormatFactoryTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSchemaDerivation() {
	final Map<String, String> properties = toMap(
		new Schema()
			.field("field1", Types.BOOLEAN())
			.field("field2", Types.INT())
			.field("proctime", Types.SQL_TIMESTAMP()).proctime(),
		new Json()
			.deriveSchema());

	testSchemaSerializationSchema(properties);

	testSchemaDeserializationSchema(properties);
}
 
Example 17
Source Project: flink-learning   Source File: TableExampleWordCount.java    License: Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws Exception {
    StreamExecutionEnvironment blinkStreamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
    blinkStreamEnv.setParallelism(1);
    EnvironmentSettings blinkStreamSettings = EnvironmentSettings.newInstance()
            .useBlinkPlanner()
            .inStreamingMode()
            .build();
    StreamTableEnvironment blinkStreamTableEnv = StreamTableEnvironment.create(blinkStreamEnv, blinkStreamSettings);

    String path = TableExampleWordCount.class.getClassLoader().getResource("words.txt").getPath();
    blinkStreamTableEnv
            .connect(new FileSystem().path(path))
            .withFormat(new OldCsv().field("word", Types.STRING).lineDelimiter("\n"))
            .withSchema(new Schema().field("word", Types.STRING))
            .inAppendMode()
            .registerTableSource("FlieSourceTable");

    Table wordWithCount = blinkStreamTableEnv.scan("FlieSourceTable")
            .groupBy("word")
            .select("word,count(word) as _count");
    blinkStreamTableEnv.toRetractStream(wordWithCount, Row.class).print();

    //打印结果中的 true 和 false,可能会有点疑问,为啥会多出一个字段。
    //Sink 做的事情是先删除再插入,false 表示删除上一条数据,true 表示插入该条数据

    blinkStreamTableEnv.execute("Blink Stream SQL Job");
}
 
Example 18
Source Project: flink-connectors   Source File: FlinkTableITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testBatchTableSinkUsingDescriptor() throws Exception {

    // create a Pravega stream for test purposes
    Stream stream = Stream.of(setupUtils.getScope(), "testBatchTableSinkUsingDescriptor");
    this.setupUtils.createTestStream(stream.getStreamName(), 1);

    // create a Flink Table environment
    ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment();
    env.setParallelism(1);
    BatchTableEnvironment tableEnv = BatchTableEnvironment.create(env);

    Table table = tableEnv.fromDataSet(env.fromCollection(SAMPLES));

    Pravega pravega = new Pravega();
    pravega.tableSinkWriterBuilder()
            .withRoutingKeyField("category")
            .forStream(stream)
            .withPravegaConfig(setupUtils.getPravegaConfig());

    ConnectTableDescriptor desc = tableEnv.connect(pravega)
            .withFormat(new Json().failOnMissingField(true))
            .withSchema(new Schema().field("category", DataTypes.STRING()).
                    field("value", DataTypes.INT()));
    desc.createTemporaryTable("test");

    final Map<String, String> propertiesMap = desc.toProperties();
    final TableSink<?> sink = TableFactoryService.find(BatchTableSinkFactory.class, propertiesMap)
            .createBatchTableSink(propertiesMap);

    String tableSinkPath = tableEnv.getCurrentDatabase() + "." + "PravegaSink";

    ConnectorCatalogTable<?, ?> connectorCatalogSinkTable = ConnectorCatalogTable.sink(sink, true);

    tableEnv.getCatalog(tableEnv.getCurrentCatalog()).get().createTable(
            ObjectPath.fromString(tableSinkPath),
            connectorCatalogSinkTable, false);
    table.insertInto("PravegaSink");
    env.execute();
}
 
Example 19
Source Project: bahir-flink   Source File: RedisDescriptorTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testRedisDescriptor() throws Exception {
    DataStreamSource<Row> source = (DataStreamSource<Row>) env.addSource(new TestSourceFunctionString())
            .returns(new RowTypeInfo(TypeInformation.of(String.class), TypeInformation.of(Long.class)));

    EnvironmentSettings settings = EnvironmentSettings
            .newInstance()
            .useOldPlanner()
            .inStreamingMode()
            .build();
    StreamTableEnvironment tableEnvironment = StreamTableEnvironment.create(env, settings);
    tableEnvironment.registerDataStream("t1", source, "k, v");

    Redis redis = new Redis()
            .mode(RedisValidator.REDIS_CLUSTER)
            .command(RedisCommand.INCRBY_EX.name())
            .ttl(100000)
            .property(RedisValidator.REDIS_NODES, REDIS_HOST+ ":" + REDIS_PORT);

    tableEnvironment
            .connect(redis).withSchema(new Schema()
            .field("k", TypeInformation.of(String.class))
            .field("v", TypeInformation.of(Long.class)))
            .registerTableSink("redis");


    tableEnvironment.sqlUpdate("insert into redis select k, v from t1");
    env.execute("Test Redis Table");
}
 
Example 20
protected Map<String, String> createElasticSearchProperties() {
	return new TestTableDescriptor(
		new Elasticsearch()
			.version(getElasticsearchVersion())
			.host(HOSTNAME, PORT, SCHEMA)
			.index(INDEX)
			.documentType(DOC_TYPE)
			.keyDelimiter(KEY_DELIMITER)
			.keyNullLiteral(KEY_NULL_LITERAL)
			.bulkFlushBackoffExponential()
			.bulkFlushBackoffDelay(123L)
			.bulkFlushBackoffMaxRetries(3)
			.bulkFlushInterval(100L)
			.bulkFlushMaxActions(1000)
			.bulkFlushMaxSize("1 MB")
			.failureHandlerCustom(DummyFailureHandler.class)
			.connectionMaxRetryTimeout(100)
			.connectionPathPrefix("/myapp"))
		.withFormat(
			new Json()
				.deriveSchema())
		.withSchema(
			new Schema()
				.field(FIELD_KEY, DataTypes.BIGINT())
				.field(FIELD_FRUIT_NAME, DataTypes.STRING())
				.field(FIELD_COUNT, DataTypes.DECIMAL(10, 4))
				.field(FIELD_TS, DataTypes.TIMESTAMP(3)))
		.inUpsertMode()
		.toProperties();
}
 
Example 21
Source Project: flink   Source File: CatalogTableImpl.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, String> toProperties() {
	DescriptorProperties descriptor = new DescriptorProperties();

	descriptor.putTableSchema(Schema.SCHEMA, getSchema());
	descriptor.putPartitionKeys(getPartitionKeys());

	Map<String, String> properties = new HashMap<>(getProperties());
	properties.remove(CatalogConfig.IS_GENERIC);

	descriptor.putProperties(properties);

	return descriptor.asMap();
}
 
Example 22
Source Project: flink   Source File: CatalogTableImpl.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Construct a {@link CatalogTableImpl} from complete properties that contains table schema.
 */
public static CatalogTableImpl fromProperties(Map<String, String> properties) {
	DescriptorProperties descriptorProperties = new DescriptorProperties();
	descriptorProperties.putProperties(properties);
	TableSchema tableSchema = descriptorProperties.getTableSchema(Schema.SCHEMA);
	List<String> partitionKeys = descriptorProperties.getPartitionKeys();
	return new CatalogTableImpl(
			tableSchema,
			partitionKeys,
			removeRedundant(properties, tableSchema, partitionKeys),
			""
	);
}
 
Example 23
Source Project: flink   Source File: CatalogTableImpl.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Construct catalog table properties from {@link #toProperties()}.
 */
public static Map<String, String> removeRedundant(
		Map<String, String> properties,
		TableSchema schema,
		List<String> partitionKeys) {
	Map<String, String> ret = new HashMap<>(properties);
	DescriptorProperties descriptorProperties = new DescriptorProperties();
	descriptorProperties.putTableSchema(Schema.SCHEMA, schema);
	descriptorProperties.putPartitionKeys(partitionKeys);
	descriptorProperties.asMap().keySet().forEach(ret::remove);
	return ret;
}
 
Example 24
Source Project: flink   Source File: TableEnvironmentTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testConnect() {
	final TableEnvironmentMock tableEnv = TableEnvironmentMock.getStreamingInstance();

	tableEnv
		.connect(new ConnectorDescriptorMock(TableSourceFactoryMock.CONNECTOR_TYPE_VALUE, 1, true))
		.withFormat(new FormatDescriptorMock("my_format", 1))
		.withSchema(new Schema()
			.field("my_field_0", "INT")
			.field("my_field_1", "BOOLEAN")
			.field("my_part_1", "BIGINT")
			.field("my_part_2", "STRING"))
		.withPartitionKeys(Arrays.asList("my_part_1", "my_part_2"))
		.inAppendMode()
		.createTemporaryTable("my_table");

	CatalogManager.TableLookupResult lookupResult = tableEnv.catalogManager.getTable(ObjectIdentifier.of(
		EnvironmentSettings.DEFAULT_BUILTIN_CATALOG,
		EnvironmentSettings.DEFAULT_BUILTIN_DATABASE,
		"my_table"))
		.orElseThrow(AssertionError::new);

	assertThat(lookupResult.isTemporary(), equalTo(true));

	CatalogBaseTable catalogBaseTable = lookupResult.getTable();
	assertTrue(catalogBaseTable instanceof CatalogTable);
	CatalogTable table = (CatalogTable) catalogBaseTable;
	assertCatalogTable(table);
	assertCatalogTable(CatalogTableImpl.fromProperties(table.toProperties()));
}
 
Example 25
Source Project: flink   Source File: TableSourceFactoryMock.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public TableSource<Row> createTableSource(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = new DescriptorProperties();
	descriptorProperties.putProperties(properties);
	final TableSchema schema = TableSchemaUtils.getPhysicalSchema(
		descriptorProperties.getTableSchema(Schema.SCHEMA));
	return new TableSourceMock(schema);
}
 
Example 26
Source Project: flink   Source File: JsonRowFormatFactoryTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSchemaDerivation() {
	final Map<String, String> properties = toMap(
		new Schema()
			.field("field1", Types.BOOLEAN())
			.field("field2", Types.INT())
			.field("proctime", Types.SQL_TIMESTAMP()).proctime(),
		new Json()
			.deriveSchema());

	testSchemaSerializationSchema(properties);

	testSchemaDeserializationSchema(properties);
}
 
Example 27
Source Project: flink   Source File: JsonRowFormatFactoryTest.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSchemaDerivationByDefault() {
	final Map<String, String> properties = toMap(
		new Schema()
			.field("field1", Types.BOOLEAN())
			.field("field2", Types.INT())
			.field("proctime", Types.SQL_TIMESTAMP()).proctime(),
		new Json());

	testSchemaSerializationSchema(properties);

	testSchemaDeserializationSchema(properties);
}
 
Example 28
@Test
public void testTableSink() {
	// prepare parameters for Elasticsearch table sink

	final TableSchema schema = createTestSchema();

	final ElasticsearchUpsertTableSinkBase expectedSink = getExpectedTableSink(
		false,
		schema,
		Collections.singletonList(new Host(HOSTNAME, PORT, SCHEMA)),
		INDEX,
		DOC_TYPE,
		KEY_DELIMITER,
		KEY_NULL_LITERAL,
		new JsonRowSerializationSchema(schema.toRowType()),
		XContentType.JSON,
		new DummyFailureHandler(),
		createTestSinkOptions());

	// construct table sink using descriptors and table sink factory

	final TestTableDescriptor testDesc = new TestTableDescriptor(
			new Elasticsearch()
				.version(getElasticsearchVersion())
				.host(HOSTNAME, PORT, SCHEMA)
				.index(INDEX)
				.documentType(DOC_TYPE)
				.keyDelimiter(KEY_DELIMITER)
				.keyNullLiteral(KEY_NULL_LITERAL)
				.bulkFlushBackoffExponential()
				.bulkFlushBackoffDelay(123L)
				.bulkFlushBackoffMaxRetries(3)
				.bulkFlushInterval(100L)
				.bulkFlushMaxActions(1000)
				.bulkFlushMaxSize("1 MB")
				.failureHandlerCustom(DummyFailureHandler.class)
				.connectionMaxRetryTimeout(100)
				.connectionPathPrefix("/myapp"))
		.withFormat(
			new Json()
				.deriveSchema())
		.withSchema(
			new Schema()
				.field(FIELD_KEY, Types.LONG())
				.field(FIELD_FRUIT_NAME, Types.STRING())
				.field(FIELD_COUNT, Types.DECIMAL())
				.field(FIELD_TS, Types.SQL_TIMESTAMP()))
		.inUpsertMode();

	final Map<String, String> propertiesMap = testDesc.toProperties();
	final TableSink<?> actualSink = TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
		.createStreamTableSink(propertiesMap);

	assertEquals(expectedSink, actualSink);
}
 
Example 29
@Test
@SuppressWarnings("unchecked")
public void testTableSource() {

	// prepare parameters for Kafka table source

	final TableSchema schema = TableSchema.builder()
		.field(FRUIT_NAME, Types.STRING())
		.field(COUNT, Types.DECIMAL())
		.field(EVENT_TIME, Types.SQL_TIMESTAMP())
		.field(PROC_TIME, Types.SQL_TIMESTAMP())
		.build();

	final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors = Collections.singletonList(
		new RowtimeAttributeDescriptor(EVENT_TIME, new ExistingField(TIME), new AscendingTimestamps()));

	final Map<String, String> fieldMapping = new HashMap<>();
	fieldMapping.put(FRUIT_NAME, NAME);
	fieldMapping.put(NAME, NAME);
	fieldMapping.put(COUNT, COUNT);
	fieldMapping.put(TIME, TIME);

	final Map<KafkaTopicPartition, Long> specificOffsets = new HashMap<>();
	specificOffsets.put(new KafkaTopicPartition(TOPIC, PARTITION_0), OFFSET_0);
	specificOffsets.put(new KafkaTopicPartition(TOPIC, PARTITION_1), OFFSET_1);

	final TestDeserializationSchema deserializationSchema = new TestDeserializationSchema(
		TableSchema.builder()
			.field(NAME, Types.STRING())
			.field(COUNT, Types.DECIMAL())
			.field(TIME, Types.SQL_TIMESTAMP())
			.build()
			.toRowType()
	);

	final KafkaTableSourceBase expected = getExpectedKafkaTableSource(
		schema,
		Optional.of(PROC_TIME),
		rowtimeAttributeDescriptors,
		fieldMapping,
		TOPIC,
		KAFKA_PROPERTIES,
		deserializationSchema,
		StartupMode.SPECIFIC_OFFSETS,
		specificOffsets);

	TableSourceUtil.validateTableSource(expected);

	// construct table source using descriptors and table source factory

	final TestTableDescriptor testDesc = new TestTableDescriptor(
			new Kafka()
				.version(getKafkaVersion())
				.topic(TOPIC)
				.properties(KAFKA_PROPERTIES)
				.sinkPartitionerRoundRobin() // test if accepted although not needed
				.startFromSpecificOffsets(OFFSETS))
		.withFormat(new TestTableFormat())
		.withSchema(
			new Schema()
				.field(FRUIT_NAME, Types.STRING()).from(NAME)
				.field(COUNT, Types.DECIMAL()) // no from so it must match with the input
				.field(EVENT_TIME, Types.SQL_TIMESTAMP()).rowtime(
					new Rowtime().timestampsFromField(TIME).watermarksPeriodicAscending())
				.field(PROC_TIME, Types.SQL_TIMESTAMP()).proctime())
		.inAppendMode();

	final Map<String, String> propertiesMap = testDesc.toProperties();
	final TableSource<?> actualSource = TableFactoryService.find(StreamTableSourceFactory.class, propertiesMap)
		.createStreamTableSource(propertiesMap);

	assertEquals(expected, actualSource);

	// test Kafka consumer
	final KafkaTableSourceBase actualKafkaSource = (KafkaTableSourceBase) actualSource;
	final StreamExecutionEnvironmentMock mock = new StreamExecutionEnvironmentMock();
	actualKafkaSource.getDataStream(mock);
	assertTrue(getExpectedFlinkKafkaConsumer().isAssignableFrom(mock.sourceFunction.getClass()));
}
 
Example 30
/**
 * This test can be unified with the corresponding source test once we have fixed FLINK-9870.
 */
@Test
public void testTableSink() {
	// prepare parameters for Kafka table sink

	final TableSchema schema = TableSchema.builder()
		.field(FRUIT_NAME, Types.STRING())
		.field(COUNT, Types.DECIMAL())
		.field(EVENT_TIME, Types.SQL_TIMESTAMP())
		.build();

	final KafkaTableSinkBase expected = getExpectedKafkaTableSink(
		schema,
		TOPIC,
		KAFKA_PROPERTIES,
		Optional.of(new FlinkFixedPartitioner<>()),
		new TestSerializationSchema(schema.toRowType()));

	// construct table sink using descriptors and table sink factory

	final TestTableDescriptor testDesc = new TestTableDescriptor(
			new Kafka()
				.version(getKafkaVersion())
				.topic(TOPIC)
				.properties(KAFKA_PROPERTIES)
				.sinkPartitionerFixed()
				.startFromSpecificOffsets(OFFSETS)) // test if they accepted although not needed
		.withFormat(new TestTableFormat())
		.withSchema(
			new Schema()
				.field(FRUIT_NAME, Types.STRING())
				.field(COUNT, Types.DECIMAL())
				.field(EVENT_TIME, Types.SQL_TIMESTAMP()))
		.inAppendMode();

	final Map<String, String> propertiesMap = testDesc.toProperties();
	final TableSink<?> actualSink = TableFactoryService.find(StreamTableSinkFactory.class, propertiesMap)
		.createStreamTableSink(propertiesMap);

	assertEquals(expected, actualSink);

	// test Kafka producer
	final KafkaTableSinkBase actualKafkaSink = (KafkaTableSinkBase) actualSink;
	final DataStreamMock streamMock = new DataStreamMock(new StreamExecutionEnvironmentMock(), schema.toRowType());
	actualKafkaSink.emitDataStream(streamMock);
	assertTrue(getExpectedFlinkKafkaProducer().isAssignableFrom(streamMock.sinkFunction.getClass()));
}