Java Code Examples for org.apache.flink.table.utils.TableSchemaUtils#getPhysicalSchema()
The following examples show how to use
org.apache.flink.table.utils.TableSchemaUtils#getPhysicalSchema() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DataGenTableSourceFactory.java From flink with Apache License 2.0 | 6 votes |
@Override public DynamicTableSource createDynamicTableSource(Context context) { Configuration options = new Configuration(); context.getCatalogTable().getOptions().forEach(options::setString); TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema()); DataGenerator[] fieldGenerators = new DataGenerator[tableSchema.getFieldCount()]; for (int i = 0; i < fieldGenerators.length; i++) { fieldGenerators[i] = createDataGenerator( tableSchema.getFieldName(i).get(), tableSchema.getFieldDataType(i).get(), options); } return new DataGenTableSource(fieldGenerators, tableSchema, options.get(ROWS_PER_SECOND)); }
Example 2
Source File: Elasticsearch7DynamicSinkFactory.java From flink with Apache License 2.0 | 6 votes |
@Override public DynamicTableSink createDynamicTableSink(Context context) { TableSchema tableSchema = context.getCatalogTable().getSchema(); ElasticsearchValidationUtils.validatePrimaryKey(tableSchema); final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); final EncodingFormat<SerializationSchema<RowData>> format = helper.discoverEncodingFormat( SerializationFormatFactory.class, FORMAT_OPTION); helper.validate(); Configuration configuration = new Configuration(); context.getCatalogTable() .getOptions() .forEach(configuration::setString); Elasticsearch7Configuration config = new Elasticsearch7Configuration(configuration, context.getClassLoader()); validate(config, configuration); return new Elasticsearch7DynamicSink( format, config, TableSchemaUtils.getPhysicalSchema(tableSchema)); }
Example 3
Source File: KafkaTableSourceSinkFactoryBase.java From flink with Apache License 2.0 | 6 votes |
@Override public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); final TableSchema schema = TableSchemaUtils.getPhysicalSchema( descriptorProperties.getTableSchema(SCHEMA)); final String topic = descriptorProperties.getString(CONNECTOR_TOPIC); final Optional<String> proctime = SchemaValidator.deriveProctimeAttribute(descriptorProperties); final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors = SchemaValidator.deriveRowtimeAttributes(descriptorProperties); // see also FLINK-9870 if (proctime.isPresent() || !rowtimeAttributeDescriptors.isEmpty() || checkForCustomFieldMapping(descriptorProperties, schema)) { throw new TableException("Time attributes and custom field mappings are not supported yet."); } return createKafkaTableSink( schema, topic, getKafkaProperties(descriptorProperties), getFlinkKafkaPartitioner(descriptorProperties), getSerializationSchema(properties)); }
Example 4
Source File: JdbcTableSourceSinkFactory.java From flink with Apache License 2.0 | 6 votes |
@Override public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) { DescriptorProperties descriptorProperties = getValidatedProperties(properties); TableSchema schema = TableSchemaUtils.getPhysicalSchema( descriptorProperties.getTableSchema(SCHEMA)); final JdbcUpsertTableSink.Builder builder = JdbcUpsertTableSink.builder() .setOptions(getJdbcOptions(descriptorProperties)) .setTableSchema(schema); descriptorProperties.getOptionalInt(CONNECTOR_WRITE_FLUSH_MAX_ROWS).ifPresent(builder::setFlushMaxSize); descriptorProperties.getOptionalDuration(CONNECTOR_WRITE_FLUSH_INTERVAL).ifPresent( s -> builder.setFlushIntervalMills(s.toMillis())); descriptorProperties.getOptionalInt(CONNECTOR_WRITE_MAX_RETRIES).ifPresent(builder::setMaxRetryTimes); return builder.build(); }
Example 5
Source File: JdbcDynamicTableFactory.java From flink with Apache License 2.0 | 6 votes |
@Override public DynamicTableSink createDynamicTableSink(Context context) { final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); final ReadableConfig config = helper.getOptions(); helper.validate(); validateConfigOptions(config); JdbcOptions jdbcOptions = getJdbcOptions(config); TableSchema physicalSchema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema()); return new JdbcDynamicTableSink( jdbcOptions, getJdbcExecutionOptions(config), getJdbcDmlOptions(jdbcOptions, physicalSchema), physicalSchema); }
Example 6
Source File: JdbcValidator.java From flink with Apache License 2.0 | 6 votes |
private void validateCommonProperties(DescriptorProperties properties) { properties.validateString(CONNECTOR_URL, false, 1); properties.validateString(CONNECTOR_TABLE, false, 1); properties.validateString(CONNECTOR_DRIVER, true); properties.validateString(CONNECTOR_USERNAME, true); properties.validateString(CONNECTOR_PASSWORD, true); final String url = properties.getString(CONNECTOR_URL); final Optional<JdbcDialect> dialect = JdbcDialects.get(url); Preconditions.checkState(dialect.isPresent(), "Cannot handle such jdbc url: " + url); TableSchema schema = TableSchemaUtils.getPhysicalSchema(properties.getTableSchema(SCHEMA)); dialect.get().validate(schema); Optional<String> password = properties.getOptionalString(CONNECTOR_PASSWORD); if (password.isPresent()) { Preconditions.checkArgument( properties.getOptionalString(CONNECTOR_USERNAME).isPresent(), "Database username must be provided when database password is provided"); } }
Example 7
Source File: HBaseTableFactory.java From flink with Apache License 2.0 | 6 votes |
@Override public StreamTableSource<Row> createStreamTableSource(Map<String, String> properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); // create default configuration from current runtime env (`hbase-site.xml` in classpath) first, Configuration hbaseClientConf = HBaseConfigurationUtil.getHBaseConfiguration(); String hbaseZk = descriptorProperties.getString(CONNECTOR_ZK_QUORUM); hbaseClientConf.set(HConstants.ZOOKEEPER_QUORUM, hbaseZk); descriptorProperties .getOptionalString(CONNECTOR_ZK_NODE_PARENT) .ifPresent(v -> hbaseClientConf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, v)); String hTableName = descriptorProperties.getString(CONNECTOR_TABLE_NAME); TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema( descriptorProperties.getTableSchema(SCHEMA)); HBaseTableSchema hbaseSchema = validateTableSchema(tableSchema); return new HBaseTableSource(hbaseClientConf, hTableName, hbaseSchema, null); }
Example 8
Source File: Elasticsearch6DynamicSinkFactory.java From flink with Apache License 2.0 | 6 votes |
@Override public DynamicTableSink createDynamicTableSink(Context context) { TableSchema tableSchema = context.getCatalogTable().getSchema(); ElasticsearchValidationUtils.validatePrimaryKey(tableSchema); final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); final EncodingFormat<SerializationSchema<RowData>> format = helper.discoverEncodingFormat( SerializationFormatFactory.class, FORMAT_OPTION); helper.validate(); Configuration configuration = new Configuration(); context.getCatalogTable() .getOptions() .forEach(configuration::setString); Elasticsearch6Configuration config = new Elasticsearch6Configuration(configuration, context.getClassLoader()); validate(config, configuration); return new Elasticsearch6DynamicSink( format, config, TableSchemaUtils.getPhysicalSchema(tableSchema)); }
Example 9
Source File: TestValuesTableFactory.java From flink with Apache License 2.0 | 5 votes |
@Override public DynamicTableSource createDynamicTableSource(Context context) { FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); helper.validate(); ChangelogMode changelogMode = parseChangelogMode(helper.getOptions().get(CHANGELOG_MODE)); String runtimeSource = helper.getOptions().get(RUNTIME_SOURCE); boolean isBounded = helper.getOptions().get(BOUNDED); String dataId = helper.getOptions().get(DATA_ID); String sourceClass = helper.getOptions().get(TABLE_SOURCE_CLASS); boolean isAsync = helper.getOptions().get(ASYNC_ENABLED); String lookupFunctionClass = helper.getOptions().get(LOOKUP_FUNCTION_CLASS); boolean nestedProjectionSupported = helper.getOptions().get(NESTED_PROJECTION_SUPPORTED); if (sourceClass.equals("DEFAULT")) { Collection<Row> data = registeredData.getOrDefault(dataId, Collections.emptyList()); TableSchema physicalSchema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema()); return new TestValuesTableSource( physicalSchema, changelogMode, isBounded, runtimeSource, data, isAsync, lookupFunctionClass, nestedProjectionSupported, null); } else { try { return InstantiationUtil.instantiate( sourceClass, DynamicTableSource.class, Thread.currentThread().getContextClassLoader()); } catch (FlinkException e) { throw new RuntimeException("Can't instantiate class " + sourceClass, e); } } }
Example 10
Source File: HiveTableSink.java From flink with Apache License 2.0 | 5 votes |
public HiveTableSink( boolean userMrWriter, boolean isBounded, JobConf jobConf, ObjectIdentifier identifier, CatalogTable table) { this.userMrWriter = userMrWriter; this.isBounded = isBounded; this.jobConf = jobConf; this.identifier = identifier; this.catalogTable = table; hiveVersion = Preconditions.checkNotNull(jobConf.get(HiveCatalogValidator.CATALOG_HIVE_VERSION), "Hive version is not defined"); hiveShim = HiveShimLoader.loadHiveShim(hiveVersion); tableSchema = TableSchemaUtils.getPhysicalSchema(table.getSchema()); }
Example 11
Source File: HBaseTableFactory.java From flink with Apache License 2.0 | 5 votes |
@Override public StreamTableSink<Tuple2<Boolean, Row>> createStreamTableSink(Map<String, String> properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); HBaseOptions.Builder hbaseOptionsBuilder = HBaseOptions.builder(); hbaseOptionsBuilder.setZkQuorum(descriptorProperties.getString(CONNECTOR_ZK_QUORUM)); hbaseOptionsBuilder.setTableName(descriptorProperties.getString(CONNECTOR_TABLE_NAME)); descriptorProperties .getOptionalString(CONNECTOR_ZK_NODE_PARENT) .ifPresent(hbaseOptionsBuilder::setZkNodeParent); TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema( descriptorProperties.getTableSchema(SCHEMA)); HBaseTableSchema hbaseSchema = validateTableSchema(tableSchema); HBaseWriteOptions.Builder writeBuilder = HBaseWriteOptions.builder(); descriptorProperties .getOptionalInt(CONNECTOR_WRITE_BUFFER_FLUSH_MAX_ROWS) .ifPresent(writeBuilder::setBufferFlushMaxRows); descriptorProperties .getOptionalMemorySize(CONNECTOR_WRITE_BUFFER_FLUSH_MAX_SIZE) .ifPresent(v -> writeBuilder.setBufferFlushMaxSizeInBytes(v.getBytes())); descriptorProperties .getOptionalDuration(CONNECTOR_WRITE_BUFFER_FLUSH_INTERVAL) .ifPresent(v -> writeBuilder.setBufferFlushIntervalMillis(v.toMillis())); return new HBaseUpsertTableSink( hbaseSchema, hbaseOptionsBuilder.build(), writeBuilder.build() ); }
Example 12
Source File: JdbcDynamicTableFactory.java From flink with Apache License 2.0 | 5 votes |
@Override public DynamicTableSource createDynamicTableSource(Context context) { final FactoryUtil.TableFactoryHelper helper = FactoryUtil.createTableFactoryHelper(this, context); final ReadableConfig config = helper.getOptions(); helper.validate(); validateConfigOptions(config); TableSchema physicalSchema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema()); return new JdbcDynamicTableSource( getJdbcOptions(helper.getOptions()), getJdbcReadOptions(helper.getOptions()), getJdbcLookupOptions(helper.getOptions()), physicalSchema); }
Example 13
Source File: JdbcTableSourceSinkFactory.java From flink with Apache License 2.0 | 5 votes |
@Override public StreamTableSource<Row> createStreamTableSource(Map<String, String> properties) { DescriptorProperties descriptorProperties = getValidatedProperties(properties); TableSchema schema = TableSchemaUtils.getPhysicalSchema( descriptorProperties.getTableSchema(SCHEMA)); return JdbcTableSource.builder() .setOptions(getJdbcOptions(descriptorProperties)) .setReadOptions(getJdbcReadOptions(descriptorProperties)) .setLookupOptions(getJdbcLookupOptions(descriptorProperties)) .setSchema(schema) .build(); }
Example 14
Source File: FlinkPravegaTableFactoryBase.java From flink-connectors with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") protected FlinkPravegaTableSink createFlinkPravegaTableSink(Map<String, String> properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); final TableSchema schema = TableSchemaUtils.getPhysicalSchema(descriptorProperties.getTableSchema(SCHEMA)); SerializationSchema<Row> serializationSchema = getSerializationSchema(properties); ConnectorConfigurations connectorConfigurations = new ConnectorConfigurations(); connectorConfigurations.parseConfigurations(descriptorProperties, ConnectorConfigurations.ConfigurationType.WRITER); TableSinkWriterBuilder tableSinkWriterBuilder = new Pravega().tableSinkWriterBuilder(); if (connectorConfigurations.getTxnLeaseRenewalInterval().isPresent()) { tableSinkWriterBuilder.withTxnLeaseRenewalPeriod(Time.milliseconds(connectorConfigurations.getTxnLeaseRenewalInterval().get().longValue())); } if (connectorConfigurations.getWriterMode().isPresent()) { tableSinkWriterBuilder.withWriterMode(connectorConfigurations.getWriterMode().get()); } if (connectorConfigurations.getMetrics().isPresent()) { tableSinkWriterBuilder.enableMetrics(connectorConfigurations.getMetrics().get()); } if (connectorConfigurations.getWatermark().isPresent()) { tableSinkWriterBuilder.enableWatermark(connectorConfigurations.getWatermark().get()); } tableSinkWriterBuilder.withPravegaConfig(connectorConfigurations.getPravegaConfig()); tableSinkWriterBuilder.withRoutingKeyField(connectorConfigurations.getRoutingKey()); tableSinkWriterBuilder.withSerializationSchema(serializationSchema); tableSinkWriterBuilder.forStream(connectorConfigurations.getWriterStream()); tableSinkWriterBuilder.withPravegaConfig(connectorConfigurations.getPravegaConfig()); return new FlinkPravegaTableSink(tableSinkWriterBuilder::createSinkFunction, tableSinkWriterBuilder::createOutputFormat, schema); }
Example 15
Source File: CsvTableSinkFactoryBase.java From flink with Apache License 2.0 | 4 votes |
protected CsvTableSink createTableSink( Boolean isStreaming, Map<String, String> properties) { DescriptorProperties params = new DescriptorProperties(); params.putProperties(properties); // validate new FileSystemValidator().validate(params); new OldCsvValidator().validate(params); new SchemaValidator(isStreaming, false, false).validate(params); // build TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema(params.getTableSchema(SCHEMA)); // if a schema is defined, no matter derive schema is set or not, will use the defined schema final boolean hasSchema = params.hasPrefix(FORMAT_FIELDS); if (hasSchema) { TableSchema formatSchema = params.getTableSchema(FORMAT_FIELDS); if (!getFieldLogicalTypes(formatSchema).equals(getFieldLogicalTypes(tableSchema))) { throw new TableException(String.format( "Encodings that differ from the schema are not supported yet for" + " CsvTableSink, format schema is '%s', but table schema is '%s'.", formatSchema, tableSchema)); } } String path = params.getString(CONNECTOR_PATH); String fieldDelimiter = params.getOptionalString(FORMAT_FIELD_DELIMITER).orElse(","); Optional<String> writeModeParm = params.getOptionalString(FORMAT_WRITE_MODE); FileSystem.WriteMode writeMode = (writeModeParm.isPresent()) ? FileSystem.WriteMode.valueOf(writeModeParm.get()) : null; int numFiles = params.getOptionalInt(FORMAT_NUM_FILES).orElse(-1); // bridge to java.sql.Timestamp/Time/Date DataType[] dataTypes = Arrays.stream(tableSchema.getFieldDataTypes()) .map(dt -> { switch (dt.getLogicalType().getTypeRoot()) { case TIMESTAMP_WITHOUT_TIME_ZONE: return dt.bridgedTo(Timestamp.class); case TIME_WITHOUT_TIME_ZONE: return dt.bridgedTo(Time.class); case DATE: return dt.bridgedTo(Date.class); default: return dt; } }) .toArray(DataType[]::new); return new CsvTableSink( path, fieldDelimiter, numFiles, writeMode, tableSchema.getFieldNames(), dataTypes); }
Example 16
Source File: CsvTableSourceFactoryBase.java From flink with Apache License 2.0 | 4 votes |
protected CsvTableSource createTableSource( Boolean isStreaming, Map<String, String> properties) { DescriptorProperties params = new DescriptorProperties(); params.putProperties(properties); // validate new FileSystemValidator().validate(params); new OldCsvValidator().validate(params); new SchemaValidator(isStreaming, false, false).validate(params); // build CsvTableSource.Builder csvTableSourceBuilder = new CsvTableSource.Builder(); TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema(params.getTableSchema(SCHEMA)); // if a schema is defined, no matter derive schema is set or not, will use the defined schema final boolean hasSchema = params.hasPrefix(FORMAT_FIELDS); if (hasSchema) { TableSchema formatSchema = params.getTableSchema(FORMAT_FIELDS); // the CsvTableSource needs some rework first // for now the schema must be equal to the encoding // Ignore conversion classes in DataType if (!getFieldLogicalTypes(formatSchema).equals(getFieldLogicalTypes(tableSchema))) { throw new TableException(String.format( "Encodings that differ from the schema are not supported yet for" + " CsvTableSource, format schema is '%s', but table schema is '%s'.", formatSchema, tableSchema)); } } params.getOptionalString(CONNECTOR_PATH).ifPresent(csvTableSourceBuilder::path); params.getOptionalString(FORMAT_FIELD_DELIMITER).ifPresent(csvTableSourceBuilder::fieldDelimiter); params.getOptionalString(FORMAT_LINE_DELIMITER).ifPresent(csvTableSourceBuilder::lineDelimiter); for (int i = 0; i < tableSchema.getFieldCount(); ++i) { csvTableSourceBuilder.field(tableSchema.getFieldNames()[i], tableSchema.getFieldDataTypes()[i]); } params.getOptionalCharacter(FORMAT_QUOTE_CHARACTER).ifPresent(csvTableSourceBuilder::quoteCharacter); params.getOptionalString(FORMAT_COMMENT_PREFIX).ifPresent(csvTableSourceBuilder::commentPrefix); params.getOptionalBoolean(FORMAT_IGNORE_FIRST_LINE).ifPresent(flag -> { if (flag) { csvTableSourceBuilder.ignoreFirstLine(); } }); params.getOptionalBoolean(FORMAT_IGNORE_PARSE_ERRORS).ifPresent(flag -> { if (flag) { csvTableSourceBuilder.ignoreParseErrors(); } }); return csvTableSourceBuilder.build(); }
Example 17
Source File: FlinkPravegaTableFactoryBase.java From flink-connectors with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") protected FlinkPravegaTableSource createFlinkPravegaTableSource(Map<String, String> properties) { final DescriptorProperties descriptorProperties = getValidatedProperties(properties); final TableSchema schema = TableSchemaUtils.getPhysicalSchema(descriptorProperties.getTableSchema(SCHEMA)); final DeserializationSchema<Row> deserializationSchema = getDeserializationSchema(properties); ConnectorConfigurations connectorConfigurations = new ConnectorConfigurations(); connectorConfigurations.parseConfigurations(descriptorProperties, ConnectorConfigurations.ConfigurationType.READER); // create source from the reader builder by using the supplied properties TableSourceReaderBuilder tableSourceReaderBuilder = new Pravega().tableSourceReaderBuilder(); tableSourceReaderBuilder.withDeserializationSchema(deserializationSchema); if (connectorConfigurations.getAssignerWithTimeWindows().isPresent()) { tableSourceReaderBuilder.withTimestampAssigner(connectorConfigurations.getAssignerWithTimeWindows().get()); } if (connectorConfigurations.getUid().isPresent()) { tableSourceReaderBuilder.uid(connectorConfigurations.getUid().get()); } if (connectorConfigurations.getRgScope().isPresent()) { tableSourceReaderBuilder.withReaderGroupScope(connectorConfigurations.getRgScope().get()); } if (connectorConfigurations.getRgName().isPresent()) { tableSourceReaderBuilder.withReaderGroupName(connectorConfigurations.getRgName().get()); } if (connectorConfigurations.getRefreshInterval().isPresent()) { tableSourceReaderBuilder.withReaderGroupRefreshTime(Time.milliseconds(connectorConfigurations.getRefreshInterval().get())); } if (connectorConfigurations.getEventReadTimeoutInterval().isPresent()) { tableSourceReaderBuilder.withEventReadTimeout(Time.milliseconds(connectorConfigurations.getEventReadTimeoutInterval().get())); } if (connectorConfigurations.getCheckpointInitiateTimeoutInterval().isPresent()) { tableSourceReaderBuilder.withCheckpointInitiateTimeout(Time.milliseconds(connectorConfigurations.getCheckpointInitiateTimeoutInterval().get())); } tableSourceReaderBuilder.withPravegaConfig(connectorConfigurations.getPravegaConfig()); if (connectorConfigurations.getMetrics().isPresent()) { tableSourceReaderBuilder.enableMetrics(connectorConfigurations.getMetrics().get()); } tableSourceReaderBuilder.withPravegaConfig(connectorConfigurations.getPravegaConfig()); for (StreamWithBoundaries streamWithBoundaries: connectorConfigurations.getReaderStreams()) { if (streamWithBoundaries.getFrom() != StreamCut.UNBOUNDED && streamWithBoundaries.getTo() != StreamCut.UNBOUNDED) { tableSourceReaderBuilder.forStream(streamWithBoundaries.getStream(), streamWithBoundaries.getFrom(), streamWithBoundaries.getTo()); } else if (streamWithBoundaries.getFrom() != StreamCut.UNBOUNDED) { tableSourceReaderBuilder.forStream(streamWithBoundaries.getStream(), streamWithBoundaries.getFrom()); } else { tableSourceReaderBuilder.forStream(streamWithBoundaries.getStream()); } } FlinkPravegaTableSource flinkPravegaTableSource = new FlinkPravegaTableSource( tableSourceReaderBuilder::buildSourceFunction, tableSourceReaderBuilder::buildInputFormat, schema); flinkPravegaTableSource.setRowtimeAttributeDescriptors(SchemaValidator.deriveRowtimeAttributes(descriptorProperties)); Optional<String> procTimeAttribute = SchemaValidator.deriveProctimeAttribute(descriptorProperties); if (procTimeAttribute.isPresent()) { flinkPravegaTableSource.setProctimeAttribute(procTimeAttribute.get()); } return flinkPravegaTableSource; }