org.apache.flink.table.calcite.FlinkTypeFactory Java Examples

The following examples show how to use org.apache.flink.table.calcite.FlinkTypeFactory. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SqlToOperationConverter.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Create a table schema from {@link SqlCreateTable}. This schema contains computed column
 * fields, say, we have a create table DDL statement:
 * <blockquote><pre>
 *   create table t(
 *     a int,
 *     b varchar,
 *     c as to_timestamp(b))
 *   with (
 *     'connector' = 'csv',
 *     'k1' = 'v1')
 * </pre></blockquote>
 *
 * <p>The returned table schema contains columns (a:int, b:varchar, c:timestamp).
 *
 * @param sqlCreateTable sql create table node.
 * @param factory        FlinkTypeFactory instance.
 * @return TableSchema
 */
private TableSchema createTableSchema(SqlCreateTable sqlCreateTable,
		FlinkTypeFactory factory) {
	// setup table columns
	SqlNodeList columnList = sqlCreateTable.getColumnList();
	TableSchema physicalSchema = null;
	TableSchema.Builder builder = new TableSchema.Builder();
	// collect the physical table schema first.
	final List<SqlNode> physicalColumns = columnList.getList().stream()
		.filter(n -> n instanceof SqlTableColumn).collect(Collectors.toList());
	for (SqlNode node : physicalColumns) {
		SqlTableColumn column = (SqlTableColumn) node;
		final RelDataType relType = column.getType().deriveType(factory,
			column.getType().getNullable());
		builder.field(column.getName().getSimple(),
			TypeConversions.fromLegacyInfoToDataType(FlinkTypeFactory.toTypeInfo(relType)));
		physicalSchema = builder.build();
	}
	assert physicalSchema != null;
	if (sqlCreateTable.containsComputedColumn()) {
		throw new SqlConversionException("Computed columns for DDL is not supported yet!");
	}
	return physicalSchema;
}
 
Example #2
Source File: PlannerQueryOperation.java    From flink with Apache License 2.0 6 votes vote down vote up
public PlannerQueryOperation(RelNode calciteTree) {
	this.calciteTree = calciteTree;

	RelDataType rowType = calciteTree.getRowType();
	String[] fieldNames = rowType.getFieldNames().toArray(new String[0]);
	DataType[] fieldTypes = rowType.getFieldList()
		.stream()
		.map(field -> {
			final DataType fieldType = TypeConversions
				.fromLegacyInfoToDataType(FlinkTypeFactory.toTypeInfo(field.getType()));
			final boolean nullable = field.getType().isNullable();
			if (nullable != fieldType.getLogicalType().isNullable()
				&& !FlinkTypeFactory.isTimeIndicatorType(field.getType())) {
				return nullable ? fieldType.nullable() : fieldType.notNull();
			} else {
				return fieldType;
			}
		})
		.toArray(DataType[]::new);

	this.tableSchema = TableSchema.builder().fields(fieldNames, fieldTypes).build();
}
 
Example #3
Source File: LocalExecutor.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private static TableSchema removeTimeAttributes(TableSchema schema) {
	final TableSchema.Builder builder = TableSchema.builder();
	for (int i = 0; i < schema.getFieldCount(); i++) {
		final TypeInformation<?> type = schema.getFieldTypes()[i];
		final TypeInformation<?> convertedType;
		if (FlinkTypeFactory.isTimeIndicatorType(type)) {
			convertedType = Types.SQL_TIMESTAMP;
		} else {
			convertedType = type;
		}
		builder.field(schema.getFieldNames()[i], convertedType);
	}
	return builder.build();
}
 
Example #4
Source File: QueryOperationConverter.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public <U> RelNode visit(CalculatedQueryOperation<U> calculatedTable) {
	String[] fieldNames = calculatedTable.getTableSchema().getFieldNames();
	int[] fieldIndices = IntStream.range(0, fieldNames.length).toArray();
	TypeInformation<U> resultType = calculatedTable.getResultType();

	FlinkTableFunctionImpl function = new FlinkTableFunctionImpl<>(
		resultType,
		fieldIndices,
		fieldNames);
	TableFunction<?> tableFunction = calculatedTable.getTableFunction();

	FlinkTypeFactory typeFactory = relBuilder.getTypeFactory();
	TableSqlFunction sqlFunction = new TableSqlFunction(
		tableFunction.functionIdentifier(),
		tableFunction.toString(),
		tableFunction,
		resultType,
		typeFactory,
		function);

	List<RexNode> parameters = convertToRexNodes(calculatedTable.getParameters());

	return LogicalTableFunctionScan.create(
		relBuilder.peek().getCluster(),
		Collections.emptyList(),
		relBuilder.call(sqlFunction, parameters),
		function.getElementType(null),
		function.getRowType(typeFactory, null),
		null);
}
 
Example #5
Source File: PlannerQueryOperation.java    From flink with Apache License 2.0 5 votes vote down vote up
public PlannerQueryOperation(RelNode calciteTree) {
	this.calciteTree = calciteTree;

	RelDataType rowType = calciteTree.getRowType();
	String[] fieldNames = rowType.getFieldNames().toArray(new String[0]);
	TypeInformation[] fieldTypes = rowType.getFieldList()
		.stream()
		.map(field -> FlinkTypeFactory.toTypeInfo(field.getType())).toArray(TypeInformation[]::new);

	this.tableSchema = new TableSchema(fieldNames, fieldTypes);
}
 
Example #6
Source File: KafkaBaseSinkDescriptor.java    From alchemy with Apache License 2.0 5 votes vote down vote up
@Override
public <T> T transform(TableSchema param) throws Exception {
    TableSchema tableSchema = createTableSchema();
    if (tableSchema == null) {
        tableSchema = param;
    }
    if (tableSchema == null) {
        throw new IllegalArgumentException("TableSchema must be not null");
    }
    TypeInformation[] fieldTypes = new TypeInformation[tableSchema.getFieldCount()];
    for (int i = 0; i < tableSchema.getFieldCount(); i++) {
        if (FlinkTypeFactory.isTimeIndicatorType(tableSchema.getFieldTypes()[i])) {
            fieldTypes[i] = Types.SQL_TIMESTAMP();
        }else{
            fieldTypes[i] = tableSchema.getFieldTypes()[i];
        }
    }
    TypeInformation typeInformation = new RowTypeInfo(fieldTypes, tableSchema.getFieldNames());
    SerializationSchema<Row> rowSerializationSchema = createSerializationSchema(typeInformation);
    return (T) newTableSink(
        new TableSchema(tableSchema.getFieldNames(), fieldTypes),
        this.topic,
        PropertiesUtil.fromYamlMap(this.getProperties()),
        Optional.empty(),
        rowSerializationSchema == null ? new JsonRowSerializationSchema(typeInformation) : rowSerializationSchema
    );
}
 
Example #7
Source File: AbstractStreamSqlJob.java    From zeppelin with Apache License 2.0 5 votes vote down vote up
private static TableSchema removeTimeAttributes(TableSchema schema) {
  final TableSchema.Builder builder = TableSchema.builder();
  for (int i = 0; i < schema.getFieldCount(); i++) {
    final TypeInformation<?> type = schema.getFieldTypes()[i];
    final TypeInformation<?> convertedType;
    if (FlinkTypeFactory.isTimeIndicatorType(type)) {
      convertedType = Types.SQL_TIMESTAMP;
    } else {
      convertedType = type;
    }
    builder.field(schema.getFieldNames()[i], convertedType);
  }
  return builder.build();
}
 
Example #8
Source File: DatabaseCalciteSchema.java    From flink with Apache License 2.0 5 votes vote down vote up
private Table convertCatalogView(String tableName, CatalogView table, TableSchema resolvedSchema) {
	return new ViewTable(
		null,
		typeFactory -> ((FlinkTypeFactory) typeFactory).buildLogicalRowType(resolvedSchema),
		table.getExpandedQuery(),
		Arrays.asList(catalogName, databaseName),
		Arrays.asList(catalogName, databaseName, tableName)
	);
}
 
Example #9
Source File: QueryOperationCatalogViewTable.java    From flink with Apache License 2.0 5 votes vote down vote up
public static QueryOperationCatalogViewTable createCalciteTable(
		QueryOperationCatalogView catalogView,
		TableSchema resolvedSchema) {
	return new QueryOperationCatalogViewTable(catalogView, typeFactory -> {
		final FlinkTypeFactory flinkTypeFactory = (FlinkTypeFactory) typeFactory;
		final RelDataType relType = flinkTypeFactory.buildLogicalRowType(resolvedSchema);
		Boolean[] nullables = resolvedSchema
			.getTableColumns()
			.stream()
			.map(c -> c.getType().getLogicalType().isNullable())
			.toArray(Boolean[]::new);
		final List<RelDataTypeField> fields = relType
			.getFieldList()
			.stream()
			.map(f -> {
				boolean nullable = nullables[f.getIndex()];
				if (nullable != f.getType().isNullable()
					&& !FlinkTypeFactory.isTimeIndicatorType(f.getType())) {
					return new RelDataTypeFieldImpl(
						f.getName(),
						f.getIndex(),
						flinkTypeFactory.createTypeWithNullability(f.getType(), nullable));
				} else {
					return f;
				}
			})
			.collect(Collectors.toList());
		return flinkTypeFactory.createStructType(fields);
	});
}
 
Example #10
Source File: QueryOperationConverter.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public RelNode visit(CalculatedQueryOperation calculatedTable) {
	FlinkTypeFactory typeFactory = relBuilder.getTypeFactory();
	if (calculatedTable.getFunctionDefinition() instanceof TableFunctionDefinition) {
		TableFunctionDefinition functionDefinition =
			(TableFunctionDefinition) calculatedTable.getFunctionDefinition();

		String[] fieldNames = calculatedTable.getTableSchema().getFieldNames();
		int[] fieldIndices = IntStream.range(0, fieldNames.length).toArray();

		TableFunction<?> tableFunction = functionDefinition.getTableFunction();
		TypeInformation<?> rowType = functionDefinition.getResultType();
		FlinkTableFunctionImpl<?> function = new FlinkTableFunctionImpl<>(
			rowType,
			fieldIndices,
			fieldNames
		);

		final TableSqlFunction sqlFunction = new TableSqlFunction(
			tableFunction.functionIdentifier(),
			tableFunction.toString(),
			tableFunction,
			rowType,
			typeFactory,
			function);

		List<RexNode> parameters = convertToRexNodes(calculatedTable.getArguments());
		return LogicalTableFunctionScan.create(
			relBuilder.peek().getCluster(),
			Collections.emptyList(),
			relBuilder.call(sqlFunction, parameters),
			function.getElementType(null),
			function.getRowType(typeFactory, null),
			null);
	}

	throw new ValidationException(
		"The new type inference for functions is only supported in the Blink planner.");
}
 
Example #11
Source File: FunctionCatalogOperatorTable.java    From flink with Apache License 2.0 4 votes vote down vote up
public FunctionCatalogOperatorTable(
		FunctionCatalog functionCatalog,
		FlinkTypeFactory typeFactory) {
	this.functionCatalog = functionCatalog;
	this.typeFactory = typeFactory;
}
 
Example #12
Source File: QueryOperationCatalogViewTable.java    From flink with Apache License 2.0 4 votes vote down vote up
public static QueryOperationCatalogViewTable createCalciteTable(QueryOperationCatalogView catalogView) {
	return new QueryOperationCatalogViewTable(catalogView, typeFactory -> {
		TableSchema tableSchema = catalogView.getSchema();
		return ((FlinkTypeFactory) typeFactory).buildLogicalRowType(tableSchema);
	});
}
 
Example #13
Source File: PlanningConfigurationBuilder.java    From flink with Apache License 2.0 4 votes vote down vote up
/** Returns the {@link FlinkTypeFactory} that will be used. */
public FlinkTypeFactory getTypeFactory() {
	return typeFactory;
}
 
Example #14
Source File: SqlToOperationConverter.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Convert the {@link SqlCreateTable} node.
 */
private Operation convertCreateTable(SqlCreateTable sqlCreateTable) {
	// primary key and unique keys are not supported
	if ((sqlCreateTable.getPrimaryKeyList() != null
			&& sqlCreateTable.getPrimaryKeyList().size() > 0)
		|| (sqlCreateTable.getUniqueKeysList() != null
			&& sqlCreateTable.getUniqueKeysList().size() > 0)) {
		throw new SqlConversionException("Primary key and unique key are not supported yet.");
	}

	// set with properties
	SqlNodeList propertyList = sqlCreateTable.getPropertyList();
	Map<String, String> properties = new HashMap<>();
	if (propertyList != null) {
		propertyList.getList().forEach(p ->
			properties.put(((SqlTableOption) p).getKeyString().toLowerCase(),
				((SqlTableOption) p).getValueString()));
	}

	TableSchema tableSchema = createTableSchema(sqlCreateTable,
		new FlinkTypeFactory(new FlinkTypeSystem())); // need to make type factory singleton ?
	String tableComment = "";
	if (sqlCreateTable.getComment() != null) {
		tableComment = sqlCreateTable.getComment().getNlsString().getValue();
	}
	// set partition key
	List<String> partitionKeys = new ArrayList<>();
	SqlNodeList partitionKey = sqlCreateTable.getPartitionKeyList();
	if (partitionKey != null) {
		partitionKeys = partitionKey
			.getList()
			.stream()
			.map(p -> ((SqlIdentifier) p).getSimple())
			.collect(Collectors.toList());
	}
	CatalogTable catalogTable = new CatalogTableImpl(tableSchema,
		partitionKeys,
		properties,
		tableComment);
	return new CreateTableOperation(sqlCreateTable.fullTableName(), catalogTable,
		sqlCreateTable.isIfNotExists());
}
 
Example #15
Source File: FunctionCatalogOperatorTable.java    From flink with Apache License 2.0 4 votes vote down vote up
public FunctionCatalogOperatorTable(
		FunctionCatalog functionCatalog,
		FlinkTypeFactory typeFactory) {
	this.functionCatalog = functionCatalog;
	this.typeFactory = typeFactory;
}
 
Example #16
Source File: PlanningConfigurationBuilder.java    From flink with Apache License 2.0 4 votes vote down vote up
/** Returns the {@link FlinkTypeFactory} that will be used. */
public FlinkTypeFactory getTypeFactory() {
	return typeFactory;
}