org.apache.flink.table.descriptors.DescriptorProperties Java Examples

The following examples show how to use org.apache.flink.table.descriptors.DescriptorProperties. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HBaseConnectorITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
private static Map<String, String> hbaseTableProperties() {
	Map<String, String> properties = new HashMap<>();
	properties.put(CONNECTOR_TYPE, CONNECTOR_TYPE_VALUE_HBASE);
	properties.put(CONNECTOR_VERSION, CONNECTOR_VERSION_VALUE_143);
	properties.put(CONNECTOR_PROPERTY_VERSION, "1");
	properties.put(CONNECTOR_TABLE_NAME, TEST_TABLE_1);
	// get zk quorum from "hbase-site.xml" in classpath
	String hbaseZk = HBaseConfiguration.create().get(HConstants.ZOOKEEPER_QUORUM);
	properties.put(CONNECTOR_ZK_QUORUM, hbaseZk);
	// schema
	String[] columnNames = {FAMILY1, ROWKEY, FAMILY2, FAMILY3};
	TypeInformation<Row> f1 = Types.ROW_NAMED(new String[]{F1COL1}, Types.INT);
	TypeInformation<Row> f2 = Types.ROW_NAMED(new String[]{F2COL1, F2COL2}, Types.STRING, Types.LONG);
	TypeInformation<Row> f3 = Types.ROW_NAMED(new String[]{F3COL1, F3COL2, F3COL3}, Types.DOUBLE, Types.BOOLEAN, Types.STRING);
	TypeInformation[] columnTypes = new TypeInformation[]{f1, Types.INT, f2, f3};

	DescriptorProperties descriptorProperties = new DescriptorProperties(true);
	TableSchema tableSchema = new TableSchema(columnNames, columnTypes);
	descriptorProperties.putTableSchema(SCHEMA, tableSchema);
	descriptorProperties.putProperties(properties);
	return descriptorProperties.asMap();
}
 
Example #2
Source File: FlinkCalciteCatalogReader.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Checks whether the {@link CatalogTable} uses legacy connector source options.
 */
private static boolean isLegacySourceOptions(
		CatalogTable catalogTable,
		CatalogSchemaTable schemaTable) {
	// normalize option keys
	DescriptorProperties properties = new DescriptorProperties(true);
	properties.putProperties(catalogTable.getOptions());
	if (properties.containsKey(ConnectorDescriptorValidator.CONNECTOR_TYPE)) {
		return true;
	} else {
		// try to create legacy table source using the options,
		// some legacy factories uses the new 'connector' key
		try {
			TableFactoryUtil.findAndCreateTableSource(
				schemaTable.getCatalog(),
				schemaTable.getTableIdentifier(),
				catalogTable,
				new Configuration());
			// success, then we will use the legacy factories
			return true;
		} catch (Throwable e) {
			// fail, then we will use new factories
			return false;
		}
	}
}
 
Example #3
Source File: KafkaTableSourceSinkFactoryBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public StreamTableSource<Row> createStreamTableSource(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	final String topic = descriptorProperties.getString(CONNECTOR_TOPIC);
	final DeserializationSchema<Row> deserializationSchema = getDeserializationSchema(properties);
	final StartupOptions startupOptions = getStartupOptions(descriptorProperties, topic);

	return createKafkaTableSource(
		descriptorProperties.getTableSchema(SCHEMA()),
		SchemaValidator.deriveProctimeAttribute(descriptorProperties),
		SchemaValidator.deriveRowtimeAttributes(descriptorProperties),
		SchemaValidator.deriveFieldMapping(
			descriptorProperties,
			Optional.of(deserializationSchema.getProducedType())),
		topic,
		getKafkaProperties(descriptorProperties),
		deserializationSchema,
		startupOptions.startupMode,
		startupOptions.specificOffsets);
}
 
Example #4
Source File: KafkaTableSourceSinkFactoryBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	final TableSchema schema = descriptorProperties.getTableSchema(SCHEMA());
	final String topic = descriptorProperties.getString(CONNECTOR_TOPIC);
	final Optional<String> proctime = SchemaValidator.deriveProctimeAttribute(descriptorProperties);
	final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors =
		SchemaValidator.deriveRowtimeAttributes(descriptorProperties);

	// see also FLINK-9870
	if (proctime.isPresent() || !rowtimeAttributeDescriptors.isEmpty() ||
			checkForCustomFieldMapping(descriptorProperties, schema)) {
		throw new TableException("Time attributes and custom field mappings are not supported yet.");
	}

	return createKafkaTableSink(
		schema,
		topic,
		getKafkaProperties(descriptorProperties),
		getFlinkKafkaPartitioner(descriptorProperties),
		getSerializationSchema(properties));
}
 
Example #5
Source File: JsonRowFormatFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public DeserializationSchema<Row> createDeserializationSchema(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	// create and configure
	final JsonRowDeserializationSchema.Builder schema =
		new JsonRowDeserializationSchema.Builder(createTypeInformation(descriptorProperties));

	descriptorProperties.getOptionalBoolean(JsonValidator.FORMAT_FAIL_ON_MISSING_FIELD)
		.ifPresent(flag -> {
			if (flag) {
				schema.failOnMissingField();
			}
		});
	descriptorProperties.getOptionalBoolean(JsonValidator.FORMAT_IGNORE_PARSE_ERRORS)
		.ifPresent(flag -> {
			if (flag) {
				schema.ignoreParseErrors();
			}
		});
	return schema.build();
}
 
Example #6
Source File: DatahubDescriptorValidator.java    From alibaba-flink-connectors with Apache License 2.0 6 votes vote down vote up
@Override
public void validate(DescriptorProperties properties) {
	super.validate(properties);
	properties.validateValue(CONNECTOR_TYPE, getConnectorTypeValue(), false);
	properties.validateString(CONNECTOR_PROJECT, false, 1);
	properties.validateString(CONNECTOR_TOPIC, false, 1);
	properties.validateString(CONNECTOR_ACCESS_ID, false, 1);
	properties.validateString(CONNECTOR_ACCESS_KEY, false, 1);
	properties.validateString(CONNECTOR_ENDPOINT, false, 1);

	properties.validateInt(CONNECTOR_BUFFER_SIZE, true, 1);
	properties.validateInt(CONNECTOR_BATCH_SIZE, true, 1);
	properties.validateLong(CONNECTOR_BATCH_WRITE_TIMEOUT_IN_MILLS, true, 1);
	properties.validateInt(CONNECTOR_RETRY_TIMEOUT_IN_MILLS, true, 1);
	properties.validateInt(CONNECTOR_MAX_RETRY_TIMES, true, 1);
}
 
Example #7
Source File: KafkaTableSourceSinkFactoryBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private Optional<FlinkKafkaPartitioner<Row>> getFlinkKafkaPartitioner(DescriptorProperties descriptorProperties) {
	return descriptorProperties
		.getOptionalString(CONNECTOR_SINK_PARTITIONER)
		.flatMap((String partitionerString) -> {
			switch (partitionerString) {
				case CONNECTOR_SINK_PARTITIONER_VALUE_FIXED:
					return Optional.of(new FlinkFixedPartitioner<>());
				case CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN:
					return Optional.empty();
				case CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM:
					final Class<? extends FlinkKafkaPartitioner> partitionerClass =
						descriptorProperties.getClass(CONNECTOR_SINK_PARTITIONER_CLASS, FlinkKafkaPartitioner.class);
					return Optional.of((FlinkKafkaPartitioner<Row>) InstantiationUtil.instantiate(partitionerClass));
				default:
					throw new TableException("Unsupported sink partitioner. Validator should have checked that.");
			}
		});
}
 
Example #8
Source File: AmbiguousTableFactoryException.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public String getMessage() {
	return String.format(
		"More than one suitable table factory for '%s' could be found in the classpath.\n\n" +
			"The following factories match:\n%s\n\n" +
			"The following properties are requested:\n%s\n\n" +
			"The following factories have been considered:\n%s",
		factoryClass.getName(),
		matchingFactories.stream()
			.map(p -> p.getClass().getName())
			.collect(Collectors.joining("\n")),
		DescriptorProperties.toString(properties),
		factories.stream()
			.map(p -> p.getClass().getName())
			.collect(Collectors.joining("\n"))
	);
}
 
Example #9
Source File: KafkaTableSourceSinkFactoryBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	final TableSchema schema = descriptorProperties.getTableSchema(SCHEMA);
	final String topic = descriptorProperties.getString(CONNECTOR_TOPIC);
	final Optional<String> proctime = SchemaValidator.deriveProctimeAttribute(descriptorProperties);
	final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors =
		SchemaValidator.deriveRowtimeAttributes(descriptorProperties);

	// see also FLINK-9870
	if (proctime.isPresent() || !rowtimeAttributeDescriptors.isEmpty() ||
			checkForCustomFieldMapping(descriptorProperties, schema)) {
		throw new TableException("Time attributes and custom field mappings are not supported yet.");
	}

	return createKafkaTableSink(
		schema,
		topic,
		getKafkaProperties(descriptorProperties),
		getFlinkKafkaPartitioner(descriptorProperties),
		getSerializationSchema(properties));
}
 
Example #10
Source File: KafkaTableSourceSinkFactoryBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private Optional<FlinkKafkaPartitioner<Row>> getFlinkKafkaPartitioner(DescriptorProperties descriptorProperties) {
	return descriptorProperties
		.getOptionalString(CONNECTOR_SINK_PARTITIONER)
		.flatMap((String partitionerString) -> {
			switch (partitionerString) {
				case CONNECTOR_SINK_PARTITIONER_VALUE_FIXED:
					return Optional.of(new FlinkFixedPartitioner<>());
				case CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN:
					return Optional.empty();
				case CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM:
					final Class<? extends FlinkKafkaPartitioner> partitionerClass =
						descriptorProperties.getClass(CONNECTOR_SINK_PARTITIONER_CLASS, FlinkKafkaPartitioner.class);
					return Optional.of((FlinkKafkaPartitioner<Row>) InstantiationUtil.instantiate(partitionerClass));
				default:
					throw new TableException("Unsupported sink partitioner. Validator should have checked that.");
			}
		});
}
 
Example #11
Source File: CsvTableSourceFactoryBase.java    From flink with Apache License 2.0 6 votes vote down vote up
public List<String> supportedProperties() {
	List<String> properties = new ArrayList<>();
	// connector
	properties.add(CONNECTOR_PATH);
	// format
	properties.add(FORMAT_FIELDS + ".#." + DescriptorProperties.TABLE_SCHEMA_TYPE);
	properties.add(FORMAT_FIELDS + ".#." + DescriptorProperties.TABLE_SCHEMA_NAME);
	properties.add(FORMAT_FIELD_DELIMITER);
	properties.add(FORMAT_LINE_DELIMITER);
	properties.add(FORMAT_QUOTE_CHARACTER);
	properties.add(FORMAT_COMMENT_PREFIX);
	properties.add(FORMAT_IGNORE_FIRST_LINE);
	properties.add(FORMAT_IGNORE_PARSE_ERRORS);
	properties.add(CONNECTOR_PATH);
	// schema
	properties.add(SCHEMA + ".#." + DescriptorProperties.TABLE_SCHEMA_TYPE);
	properties.add(SCHEMA + ".#." + DescriptorProperties.TABLE_SCHEMA_NAME);
	return properties;
}
 
Example #12
Source File: TestTableSourceFactoryBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public List<String> supportedProperties() {
	final List<String> properties = new ArrayList<>();
	properties.add("connector." + testProperty);
	properties.add(SCHEMA + ".#." + SCHEMA_DATA_TYPE);
	properties.add(SCHEMA + ".#." + SCHEMA_TYPE);
	properties.add(SCHEMA + ".#." + SCHEMA_NAME);
	properties.add(SCHEMA + ".#." + EXPR);
	properties.add(SCHEMA + ".#." + ROWTIME_TIMESTAMPS_TYPE);
	properties.add(SCHEMA + ".#." + ROWTIME_TIMESTAMPS_FROM);
	properties.add(SCHEMA + ".#." + ROWTIME_WATERMARKS_TYPE);
	// watermark
	properties.add(SCHEMA + "." + WATERMARK + ".#."  + WATERMARK_ROWTIME);
	properties.add(SCHEMA + "." + WATERMARK + ".#."  + WATERMARK_STRATEGY_EXPR);
	properties.add(SCHEMA + "." + WATERMARK + ".#."  + WATERMARK_STRATEGY_DATA_TYPE);

	// table constraint
	properties.add(SCHEMA + "." + DescriptorProperties.PRIMARY_KEY_NAME);
	properties.add(SCHEMA + "." + DescriptorProperties.PRIMARY_KEY_COLUMNS);

	return properties;
}
 
Example #13
Source File: JdbcTableSourceSinkFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) {
	DescriptorProperties descriptorProperties = getValidatedProperties(properties);
	TableSchema schema = TableSchemaUtils.getPhysicalSchema(
		descriptorProperties.getTableSchema(SCHEMA));

	final JdbcUpsertTableSink.Builder builder = JdbcUpsertTableSink.builder()
		.setOptions(getJdbcOptions(descriptorProperties))
		.setTableSchema(schema);

	descriptorProperties.getOptionalInt(CONNECTOR_WRITE_FLUSH_MAX_ROWS).ifPresent(builder::setFlushMaxSize);
	descriptorProperties.getOptionalDuration(CONNECTOR_WRITE_FLUSH_INTERVAL).ifPresent(
		s -> builder.setFlushIntervalMills(s.toMillis()));
	descriptorProperties.getOptionalInt(CONNECTOR_WRITE_MAX_RETRIES).ifPresent(builder::setMaxRetryTimes);

	return builder.build();
}
 
Example #14
Source File: HBaseTableFactoryTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private DescriptorProperties createDescriptor(TableSchema tableSchema) {
	Map<String, String> tableProperties = new HashMap<>();
	tableProperties.put("connector.type", "hbase");
	tableProperties.put("connector.version", "1.4.3");
	tableProperties.put("connector.property-version", "1");
	tableProperties.put("connector.table-name", "testHBastTable");
	tableProperties.put("connector.zookeeper.quorum", "localhost:2181");
	tableProperties.put("connector.zookeeper.znode.parent", "/flink");
	tableProperties.put("connector.write.buffer-flush.max-size", "10mb");
	tableProperties.put("connector.write.buffer-flush.max-rows", "1000");
	tableProperties.put("connector.write.buffer-flush.interval", "10s");

	DescriptorProperties descriptorProperties = new DescriptorProperties(true);
	descriptorProperties.putTableSchema(SCHEMA, tableSchema);
	descriptorProperties.putProperties(tableProperties);
	return descriptorProperties;
}
 
Example #15
Source File: JdbcCatalogFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Catalog createCatalog(String name, Map<String, String> properties) {
	final DescriptorProperties prop = getValidatedProperties(properties);

	return new JdbcCatalog(
		name,
		prop.getString(CATALOG_DEFAULT_DATABASE),
		prop.getString(CATALOG_JDBC_USERNAME),
		prop.getString(CATALOG_JDBC_PASSWORD),
		prop.getString(CATALOG_JDBC_BASE_URL));
}
 
Example #16
Source File: CatalogTableImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Construct a {@link CatalogTableImpl} from complete properties that contains table schema.
 */
public static CatalogTableImpl fromProperties(Map<String, String> properties) {
	DescriptorProperties descriptorProperties = new DescriptorProperties();
	descriptorProperties.putProperties(properties);
	TableSchema tableSchema = descriptorProperties.getTableSchema(Schema.SCHEMA);
	List<String> partitionKeys = descriptorProperties.getPartitionKeys();
	return new CatalogTableImpl(
			tableSchema,
			partitionKeys,
			removeRedundant(properties, tableSchema, partitionKeys),
			""
	);
}
 
Example #17
Source File: KafkaTableSourceSinkFactoryBase.java    From flink with Apache License 2.0 5 votes vote down vote up
private boolean checkForCustomFieldMapping(DescriptorProperties descriptorProperties, TableSchema schema) {
	final Map<String, String> fieldMapping = SchemaValidator.deriveFieldMapping(
		descriptorProperties,
		Optional.of(schema.toRowType())); // until FLINK-9870 is fixed we assume that the table schema is the output type
	return fieldMapping.size() != schema.getFieldNames().length ||
		!fieldMapping.entrySet().stream().allMatch(mapping -> mapping.getKey().equals(mapping.getValue()));
}
 
Example #18
Source File: HiveCatalogValidator.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void validate(DescriptorProperties properties) {
	super.validate(properties);
	properties.validateValue(CATALOG_TYPE, CATALOG_TYPE_VALUE_HIVE, false);
	properties.validateString(CATALOG_HIVE_CONF_DIR, true, 1);
	properties.validateString(CATALOG_HIVE_VERSION, true, 1);
}
 
Example #19
Source File: TaxiFaresValidator.java    From flink-training-exercises with Apache License 2.0 5 votes vote down vote up
@Override
public void validate(DescriptorProperties properties) {
	super.validate(properties);
	properties.validateValue(CONNECTOR_TYPE, CONNECTOR_TYPE_VALUE_TAXI_FARES, false);
	properties.validateString(CONNECTOR_PATH, false);
	properties.validateInt(CONNECTOR_MAX_EVENT_DELAY_SECS, true, 0);
	properties.validateInt(CONNECTOR_SERVING_SPEED_FACTOR, true, 1);
}
 
Example #20
Source File: ConfigEntry.java    From flink with Apache License 2.0 5 votes vote down vote up
protected ConfigEntry(DescriptorProperties properties) {
	try {
		validate(properties);
	} catch (ValidationException e) {
		throw new SqlClientException("Invalid configuration entry.", e);
	}

	this.properties = properties;
}
 
Example #21
Source File: JDBCTableSourceSinkFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
private JDBCOptions getJDBCOptions(DescriptorProperties descriptorProperties) {
	final String url = descriptorProperties.getString(CONNECTOR_URL);
	final JDBCOptions.Builder builder = JDBCOptions.builder()
		.setDBUrl(url)
		.setTableName(descriptorProperties.getString(CONNECTOR_TABLE))
		.setDialect(JDBCDialects.get(url).get());

	descriptorProperties.getOptionalString(CONNECTOR_DRIVER).ifPresent(builder::setDriverName);
	descriptorProperties.getOptionalString(CONNECTOR_USERNAME).ifPresent(builder::setUsername);
	descriptorProperties.getOptionalString(CONNECTOR_PASSWORD).ifPresent(builder::setPassword);

	return builder.build();
}
 
Example #22
Source File: KuduTableFactory.java    From bahir-flink with Apache License 2.0 5 votes vote down vote up
@Override
public KuduTableSink createTableSink(Map<String, String> properties) {
    DescriptorProperties descriptorProperties = getValidatedProps(properties);
    String tableName = descriptorProperties.getString(KUDU_TABLE);
    TableSchema schema = descriptorProperties.getTableSchema(SCHEMA);

    return createTableSink(tableName, schema, properties);
}
 
Example #23
Source File: PulsarTableSourceSinkFactory.java    From pulsar-flink with Apache License 2.0 5 votes vote down vote up
private boolean checkForCustomFieldMapping(DescriptorProperties descriptorProperties, TableSchema schema) {
    final Map<String, String> fieldMapping = SchemaValidator.deriveFieldMapping(
            descriptorProperties,
            Optional.of(schema.toRowType())); // until FLINK-9870 is fixed we assume that the table schema is the output type
    return fieldMapping.size() != schema.getFieldNames().length ||
            !fieldMapping.entrySet().stream().allMatch(mapping -> mapping.getKey().equals(mapping.getValue()));
}
 
Example #24
Source File: HiveCatalogDescriptor.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected Map<String, String> toCatalogProperties() {
	final DescriptorProperties properties = new DescriptorProperties();

	if (hiveSitePath != null) {
		properties.putString(CATALOG_HIVE_CONF_DIR, hiveSitePath);
	}

	if (hiveVersion != null) {
		properties.putString(CATALOG_HIVE_VERSION, hiveVersion);
	}

	return properties.asMap();
}
 
Example #25
Source File: HBaseTableFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);
	HBaseOptions.Builder hbaseOptionsBuilder = HBaseOptions.builder();
	hbaseOptionsBuilder.setZkQuorum(descriptorProperties.getString(CONNECTOR_ZK_QUORUM));
	hbaseOptionsBuilder.setTableName(descriptorProperties.getString(CONNECTOR_TABLE_NAME));
	descriptorProperties
		.getOptionalString(CONNECTOR_ZK_NODE_PARENT)
		.ifPresent(hbaseOptionsBuilder::setZkNodeParent);

	TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema(
		descriptorProperties.getTableSchema(SCHEMA));
	HBaseTableSchema hbaseSchema = validateTableSchema(tableSchema);

	HBaseWriteOptions.Builder writeBuilder = HBaseWriteOptions.builder();
	descriptorProperties
		.getOptionalInt(CONNECTOR_WRITE_BUFFER_FLUSH_MAX_ROWS)
		.ifPresent(writeBuilder::setBufferFlushMaxRows);
	descriptorProperties
		.getOptionalMemorySize(CONNECTOR_WRITE_BUFFER_FLUSH_MAX_SIZE)
		.ifPresent(v -> writeBuilder.setBufferFlushMaxSizeInBytes(v.getBytes()));
	descriptorProperties
		.getOptionalDuration(CONNECTOR_WRITE_BUFFER_FLUSH_INTERVAL)
		.ifPresent(v -> writeBuilder.setBufferFlushIntervalMillis(v.toMillis()));

	return new HBaseUpsertTableSink(
		hbaseSchema,
		hbaseOptionsBuilder.build(),
		writeBuilder.build()
	);
}
 
Example #26
Source File: JsonRowFormatFactoryTest.java    From flink with Apache License 2.0 5 votes vote down vote up
private static Map<String, String> toMap(Descriptor... desc) {
	final DescriptorProperties descriptorProperties = new DescriptorProperties();
	for (Descriptor d : desc) {
		descriptorProperties.putProperties(d.toProperties());
	}
	return descriptorProperties.asMap();
}
 
Example #27
Source File: TableEntry.java    From flink with Apache License 2.0 5 votes vote down vote up
private static TableEntry create(DescriptorProperties properties) {
	properties.validateString(TABLES_NAME, false, 1);
	properties.validateEnumValues(
		TABLES_TYPE,
		false,
		Arrays.asList(
			TABLES_TYPE_VALUE_SOURCE,
			TABLES_TYPE_VALUE_SOURCE_TABLE,
			TABLES_TYPE_VALUE_SINK,
			TABLES_TYPE_VALUE_SINK_TABLE,
			TABLES_TYPE_VALUE_BOTH,
			TABLES_TYPE_VALUE_SOURCE_SINK_TABLE,
			TABLES_TYPE_VALUE_VIEW,
			TABLES_TYPE_VALUE_TEMPORAL_TABLE));

	final String name = properties.getString(TABLES_NAME);

	final DescriptorProperties cleanedProperties =
		properties.withoutKeys(Arrays.asList(TABLES_NAME, TABLES_TYPE));

	switch (properties.getString(TABLES_TYPE)) {
		case TABLES_TYPE_VALUE_SOURCE:
		case TABLES_TYPE_VALUE_SOURCE_TABLE:
			return new SourceTableEntry(name, cleanedProperties);
		case TABLES_TYPE_VALUE_SINK:
		case TABLES_TYPE_VALUE_SINK_TABLE:
			return new SinkTableEntry(name, cleanedProperties);
		case TABLES_TYPE_VALUE_BOTH:
		case TABLES_TYPE_VALUE_SOURCE_SINK_TABLE:
			return new SourceSinkTableEntry(name, cleanedProperties);
		case TABLES_TYPE_VALUE_VIEW:
			return new ViewEntry(name, cleanedProperties);
		case TABLES_TYPE_VALUE_TEMPORAL_TABLE:
			return new TemporalTableEntry(name, cleanedProperties);
		default:
			throw new SqlClientException("Unexpected table type.");
	}
}
 
Example #28
Source File: TableEntry.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private static TableEntry create(DescriptorProperties properties) {
	properties.validateString(TABLES_NAME, false, 1);
	properties.validateEnumValues(
		TABLES_TYPE,
		false,
		Arrays.asList(
			TABLES_TYPE_VALUE_SOURCE,
			TABLES_TYPE_VALUE_SOURCE_TABLE,
			TABLES_TYPE_VALUE_SINK,
			TABLES_TYPE_VALUE_SINK_TABLE,
			TABLES_TYPE_VALUE_BOTH,
			TABLES_TYPE_VALUE_SOURCE_SINK_TABLE,
			TABLES_TYPE_VALUE_VIEW,
			TABLES_TYPE_VALUE_TEMPORAL_TABLE));

	final String name = properties.getString(TABLES_NAME);

	final DescriptorProperties cleanedProperties =
		properties.withoutKeys(Arrays.asList(TABLES_NAME, TABLES_TYPE));

	switch (properties.getString(TABLES_TYPE)) {
		case TABLES_TYPE_VALUE_SOURCE:
		case TABLES_TYPE_VALUE_SOURCE_TABLE:
			return new SourceTableEntry(name, cleanedProperties);
		case TABLES_TYPE_VALUE_SINK:
		case TABLES_TYPE_VALUE_SINK_TABLE:
			return new SinkTableEntry(name, cleanedProperties);
		case TABLES_TYPE_VALUE_BOTH:
		case TABLES_TYPE_VALUE_SOURCE_SINK_TABLE:
			return new SourceSinkTableEntry(name, cleanedProperties);
		case TABLES_TYPE_VALUE_VIEW:
			return new ViewEntry(name, cleanedProperties);
		case TABLES_TYPE_VALUE_TEMPORAL_TABLE:
			return new TemporalTableEntry(name, cleanedProperties);
		default:
			throw new SqlClientException("Unexpected table type.");
	}
}
 
Example #29
Source File: TestTableSinkFactoryBase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties params = new DescriptorProperties(true);
	params.putProperties(properties);
	return new TestTableSink(
			SchemaValidator.deriveTableSinkSchema(params),
			properties.get(testProperty));
}
 
Example #30
Source File: TemporalTableEntry.java    From flink with Apache License 2.0 5 votes vote down vote up
TemporalTableEntry(String name, DescriptorProperties properties) {
	super(name, properties);

	historyTable = properties.getString(TABLES_HISTORY_TABLE);
	primaryKeyFields = properties.getArray(TABLES_PRIMARY_KEY, properties::getString);
	timeAttribute = properties.getString(TABLES_TIME_ATTRIBUTE);
}