Java Code Examples for org.apache.flink.table.api.TableSchema#getFieldCount()

The following examples show how to use org.apache.flink.table.api.TableSchema#getFieldCount() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DescriptorProperties.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
/**
 * Adds a table schema under the given key.
 */
public void putTableSchema(String key, TableSchema schema) {
	checkNotNull(key);
	checkNotNull(schema);

	final String[] fieldNames = schema.getFieldNames();
	final TypeInformation<?>[] fieldTypes = schema.getFieldTypes();

	final List<List<String>> values = new ArrayList<>();
	for (int i = 0; i < schema.getFieldCount(); i++) {
		values.add(Arrays.asList(fieldNames[i], TypeStringUtils.writeTypeInfo(fieldTypes[i])));
	}

	putIndexedFixedProperties(
		key,
		Arrays.asList(TABLE_SCHEMA_NAME, TABLE_SCHEMA_TYPE),
		values);
}
 
Example 2
Source File: PulsarMetadataReader.java    From pulsar-flink with Apache License 2.0 6 votes vote down vote up
public void putSchema(ObjectPath tablePath, CatalogBaseTable table) throws IncompatibleSchemaException {
    String topic = objectPath2TopicName(tablePath);
    TableSchema tableSchema = table.getSchema();
    List<String> fieldsRemaining = new ArrayList<>(tableSchema.getFieldCount());
    for (String fieldName : tableSchema.getFieldNames()) {
        if (!PulsarOptions.META_FIELD_NAMES.contains(fieldName)) {
            fieldsRemaining.add(fieldName);
        }
    }

    DataType dataType;

    if (fieldsRemaining.size() == 1) {
        dataType = tableSchema.getFieldDataType(fieldsRemaining.get(0)).get();
    } else {
        List<DataTypes.Field> fieldList = fieldsRemaining.stream()
                .map(f -> DataTypes.FIELD(f, tableSchema.getFieldDataType(f).get()))
                .collect(Collectors.toList());
        dataType = DataTypes.ROW(fieldList.toArray(new DataTypes.Field[0]));
    }

    SchemaInfo si = SchemaUtils.sqlType2PulsarSchema(dataType).getSchemaInfo();
    SchemaUtils.uploadPulsarSchema(admin, topic, si);
}
 
Example 3
Source File: DescriptorProperties.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Adds a table schema under the given key.
 */
public void putTableSchema(String key, TableSchema schema) {
	checkNotNull(key);
	checkNotNull(schema);

	final String[] fieldNames = schema.getFieldNames();
	final TypeInformation<?>[] fieldTypes = schema.getFieldTypes();

	final List<List<String>> values = new ArrayList<>();
	for (int i = 0; i < schema.getFieldCount(); i++) {
		values.add(Arrays.asList(fieldNames[i], TypeStringUtils.writeTypeInfo(fieldTypes[i])));
	}

	putIndexedFixedProperties(
		key,
		Arrays.asList(TABLE_SCHEMA_NAME, TABLE_SCHEMA_TYPE),
		values);
}
 
Example 4
Source File: DataGenTableSourceFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public DynamicTableSource createDynamicTableSource(Context context) {
	Configuration options = new Configuration();
	context.getCatalogTable().getOptions().forEach(options::setString);

	TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema(context.getCatalogTable().getSchema());

	DataGenerator[] fieldGenerators = new DataGenerator[tableSchema.getFieldCount()];
	for (int i = 0; i < fieldGenerators.length; i++) {
		fieldGenerators[i] = createDataGenerator(
				tableSchema.getFieldName(i).get(),
				tableSchema.getFieldDataType(i).get(),
				options);
	}

	return new DataGenTableSource(fieldGenerators, tableSchema, options.get(ROWS_PER_SECOND));
}
 
Example 5
Source File: JoinQueryOperation.java    From flink with Apache License 2.0 6 votes vote down vote up
private TableSchema calculateResultingSchema(QueryOperation left, QueryOperation right) {
	TableSchema leftSchema = left.getTableSchema();
	TableSchema rightSchema = right.getTableSchema();
	int resultingSchemaSize = leftSchema.getFieldCount() + rightSchema.getFieldCount();
	String[] newFieldNames = new String[resultingSchemaSize];
	System.arraycopy(leftSchema.getFieldNames(), 0, newFieldNames, 0, leftSchema.getFieldCount());
	System.arraycopy(
		rightSchema.getFieldNames(),
		0,
		newFieldNames,
		leftSchema.getFieldCount(),
		rightSchema.getFieldCount());

	TypeInformation[] newFieldTypes = new TypeInformation[resultingSchemaSize];

	System.arraycopy(leftSchema.getFieldTypes(), 0, newFieldTypes, 0, leftSchema.getFieldCount());
	System.arraycopy(
		rightSchema.getFieldTypes(),
		0,
		newFieldTypes,
		leftSchema.getFieldCount(),
		rightSchema.getFieldCount());
	return new TableSchema(newFieldNames, newFieldTypes);
}
 
Example 6
Source File: AliasOperationUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a list of valid alias expressions. Resulting expression might still contain
 * {@link UnresolvedReferenceExpression}.
 *
 * @param aliases aliases to validate
 * @param child relational operation on top of which to apply the aliases
 * @return validated list of aliases
 */
public static List<Expression> createAliasList(List<Expression> aliases, QueryOperation child) {
	TableSchema childSchema = child.getTableSchema();

	if (aliases.size() > childSchema.getFieldCount()) {
		throw new ValidationException("Aliasing more fields than we actually have.");
	}

	List<ValueLiteralExpression> fieldAliases = aliases.stream()
		.map(f -> f.accept(aliasLiteralValidator))
		.collect(Collectors.toList());

	String[] childNames = childSchema.getFieldNames();
	return IntStream.range(0, childNames.length)
		.mapToObj(idx -> {
			UnresolvedReferenceExpression oldField = unresolvedRef(childNames[idx]);
			if (idx < fieldAliases.size()) {
				ValueLiteralExpression alias = fieldAliases.get(idx);
				return unresolvedCall(BuiltInFunctionDefinitions.AS, oldField, alias);
			} else {
				return oldField;
			}
		}).collect(Collectors.toList());
}
 
Example 7
Source File: JoinQueryOperation.java    From flink with Apache License 2.0 6 votes vote down vote up
private TableSchema calculateResultingSchema(QueryOperation left, QueryOperation right) {
	TableSchema leftSchema = left.getTableSchema();
	TableSchema rightSchema = right.getTableSchema();
	int resultingSchemaSize = leftSchema.getFieldCount() + rightSchema.getFieldCount();
	String[] newFieldNames = new String[resultingSchemaSize];
	System.arraycopy(leftSchema.getFieldNames(), 0, newFieldNames, 0, leftSchema.getFieldCount());
	System.arraycopy(
		rightSchema.getFieldNames(),
		0,
		newFieldNames,
		leftSchema.getFieldCount(),
		rightSchema.getFieldCount());

	TypeInformation[] newFieldTypes = new TypeInformation[resultingSchemaSize];

	System.arraycopy(leftSchema.getFieldTypes(), 0, newFieldTypes, 0, leftSchema.getFieldCount());
	System.arraycopy(
		rightSchema.getFieldTypes(),
		0,
		newFieldTypes,
		leftSchema.getFieldCount(),
		rightSchema.getFieldCount());
	return new TableSchema(newFieldNames, newFieldTypes);
}
 
Example 8
Source File: AliasOperationUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a list of valid alias expressions. Resulting expression might still contain
 * {@link UnresolvedReferenceExpression}.
 *
 * @param aliases aliases to validate
 * @param child relational operation on top of which to apply the aliases
 * @return validated list of aliases
 */
static List<Expression> createAliasList(List<Expression> aliases, QueryOperation child) {
	TableSchema childSchema = child.getTableSchema();

	if (aliases.size() > childSchema.getFieldCount()) {
		throw new ValidationException("Aliasing more fields than we actually have.");
	}

	List<ValueLiteralExpression> fieldAliases = aliases.stream()
		.map(f -> f.accept(aliasLiteralValidator))
		.collect(Collectors.toList());

	String[] childNames = childSchema.getFieldNames();
	return IntStream.range(0, childNames.length)
		.mapToObj(idx -> {
			UnresolvedReferenceExpression oldField = unresolvedRef(childNames[idx]);
			if (idx < fieldAliases.size()) {
				ValueLiteralExpression alias = fieldAliases.get(idx);
				return unresolvedCall(BuiltInFunctionDefinitions.AS, oldField, alias);
			} else {
				return oldField;
			}
		}).collect(Collectors.toList());
}
 
Example 9
Source File: CalculatedTableFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
private TableSchema adjustNames(
		TableSchema tableSchema,
		List<String> aliases,
		String functionName) {
	int aliasesSize = aliases.size();
	if (aliasesSize == 0) {
		return tableSchema;
	}

	int callArity = tableSchema.getFieldCount();
	if (callArity != aliasesSize) {
		throw new ValidationException(String.format(
			"List of column aliases must have same degree as table; " +
				"the returned table of function '%s' has " +
				"%d columns, whereas alias list has %d columns",
			functionName,
			callArity,
			aliasesSize));
	}

	return TableSchema.builder()
		.fields(aliases.toArray(new String[0]), tableSchema.getFieldDataTypes())
		.build();
}
 
Example 10
Source File: SelectTableSinkSchemaConverter.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Convert time attributes (proc time / event time) to regular timestamp
 * and build a new {@link TableSchema}.
 */
public static TableSchema convertTimeAttributeToRegularTimestamp(TableSchema tableSchema) {
	DataType[] dataTypes = tableSchema.getFieldDataTypes();
	String[] oldNames = tableSchema.getFieldNames();

	TableSchema.Builder builder = TableSchema.builder();
	for (int i = 0; i < tableSchema.getFieldCount(); i++) {
		DataType fieldType = dataTypes[i];
		String fieldName = oldNames[i];
		if (fieldType.getLogicalType() instanceof TimestampType) {
			TimestampType timestampType = (TimestampType) fieldType.getLogicalType();
			if (!timestampType.getKind().equals(TimestampKind.REGULAR)) {
				// converts `TIME ATTRIBUTE(ROWTIME)`/`TIME ATTRIBUTE(PROCTIME)` to `TIMESTAMP(3)`
				builder.field(fieldName, DataTypes.TIMESTAMP(3));
				continue;
			}
		}
		builder.field(fieldName, fieldType);
	}
	return builder.build();
}
 
Example 11
Source File: LocalExecutor.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
private static TableSchema removeTimeAttributes(TableSchema schema) {
	final TableSchema.Builder builder = TableSchema.builder();
	for (int i = 0; i < schema.getFieldCount(); i++) {
		final TypeInformation<?> type = schema.getFieldTypes()[i];
		final TypeInformation<?> convertedType;
		if (FlinkTypeFactory.isTimeIndicatorType(type)) {
			convertedType = Types.SQL_TIMESTAMP;
		} else {
			convertedType = type;
		}
		builder.field(schema.getFieldNames()[i], convertedType);
	}
	return builder.build();
}
 
Example 12
Source File: SetOperationFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
private void validateSetOperation(
		SetQueryOperationType operationType,
		QueryOperation left,
		QueryOperation right) {
	TableSchema leftSchema = left.getTableSchema();
	int leftFieldCount = leftSchema.getFieldCount();
	TableSchema rightSchema = right.getTableSchema();
	int rightFieldCount = rightSchema.getFieldCount();

	if (leftFieldCount != rightFieldCount) {
		throw new ValidationException(
			format(
				"The %s operation on two tables of different column sizes: %d and %d is not supported",
				operationType.toString().toLowerCase(),
				leftFieldCount,
				rightFieldCount));
	}

	TypeInformation<?>[] leftFieldTypes = leftSchema.getFieldTypes();
	TypeInformation<?>[] rightFieldTypes = rightSchema.getFieldTypes();
	boolean sameSchema = IntStream.range(0, leftFieldCount)
		.allMatch(idx -> leftFieldTypes[idx].equals(rightFieldTypes[idx]));

	if (!sameSchema) {
		throw new ValidationException(
			format(
				"The %s operation on two tables of different schemas: %s and %s is not supported.",
				operationType.toString().toLowerCase(),
				leftSchema,
				rightSchema));
	}
}
 
Example 13
Source File: LocalExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
private static TableSchema removeTimeAttributes(TableSchema schema) {
	final TableSchema.Builder builder = TableSchema.builder();
	for (int i = 0; i < schema.getFieldCount(); i++) {
		final DataType dataType = schema.getFieldDataTypes()[i];
		final DataType convertedType = DataTypeUtils.replaceLogicalType(
			dataType,
			LogicalTypeUtils.removeTimeAttributes(dataType.getLogicalType()));
		builder.field(schema.getFieldNames()[i], convertedType);
	}
	return builder.build();
}
 
Example 14
Source File: FlinkSqlInterrpeter.java    From zeppelin with Apache License 2.0 5 votes vote down vote up
private void callDescribe(String name, InterpreterContext context) throws IOException {
  TableSchema schema = tbenv.scan(name).getSchema();
  StringBuilder builder = new StringBuilder();
  builder.append("Column\tType\n");
  for (int i = 0; i < schema.getFieldCount(); ++i) {
    builder.append(schema.getFieldName(i).get() + "\t" + schema.getFieldDataType(i).get() + "\n");
  }
  context.out.write("%table\n" + builder.toString());
}
 
Example 15
Source File: AbstractStreamSqlJob.java    From zeppelin with Apache License 2.0 5 votes vote down vote up
private static TableSchema removeTimeAttributes(TableSchema schema) {
  final TableSchema.Builder builder = TableSchema.builder();
  for (int i = 0; i < schema.getFieldCount(); i++) {
    final TypeInformation<?> type = schema.getFieldTypes()[i];
    final TypeInformation<?> convertedType;
    if (FlinkTypeFactory.isTimeIndicatorType(type)) {
      convertedType = Types.SQL_TIMESTAMP;
    } else {
      convertedType = type;
    }
    builder.field(schema.getFieldNames()[i], convertedType);
  }
  return builder.build();
}
 
Example 16
Source File: LocalExecutor.java    From flink with Apache License 2.0 5 votes vote down vote up
private static TableSchema removeTimeAttributes(TableSchema schema) {
	final TableSchema.Builder builder = TableSchema.builder();
	for (int i = 0; i < schema.getFieldCount(); i++) {
		final DataType dataType = schema.getFieldDataTypes()[i];
		final DataType convertedType = DataTypeUtils.replaceLogicalType(
			dataType,
			LogicalTypeUtils.removeTimeAttributes(dataType.getLogicalType()));
		builder.field(schema.getFieldNames()[i], convertedType);
	}
	return builder.build();
}
 
Example 17
Source File: SetOperationFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
private void validateSetOperation(
		SetQueryOperationType operationType,
		QueryOperation left,
		QueryOperation right) {
	TableSchema leftSchema = left.getTableSchema();
	int leftFieldCount = leftSchema.getFieldCount();
	TableSchema rightSchema = right.getTableSchema();
	int rightFieldCount = rightSchema.getFieldCount();

	if (leftFieldCount != rightFieldCount) {
		throw new ValidationException(
			format(
				"The %s operation on two tables of different column sizes: %d and %d is not supported",
				operationType.toString().toLowerCase(),
				leftFieldCount,
				rightFieldCount));
	}

	TypeInformation<?>[] leftFieldTypes = leftSchema.getFieldTypes();
	TypeInformation<?>[] rightFieldTypes = rightSchema.getFieldTypes();
	boolean sameSchema = IntStream.range(0, leftFieldCount)
		.allMatch(idx -> leftFieldTypes[idx].equals(rightFieldTypes[idx]));

	if (!sameSchema) {
		throw new ValidationException(
			format(
				"The %s operation on two tables of different schemas: %s and %s is not supported.",
				operationType.toString().toLowerCase(),
				leftSchema,
				rightSchema));
	}
}
 
Example 18
Source File: CatalogTableSchemaResolver.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Resolve the computed column's type for the given schema.
 *
 * @param tableSchema Table schema to derive table field names and data types
 * @return the resolved TableSchema
 */
public TableSchema resolve(TableSchema tableSchema) {
	final String rowtime;
	if (!tableSchema.getWatermarkSpecs().isEmpty()) {
		// TODO: [FLINK-14473] we only support top-level rowtime attribute right now
		rowtime = tableSchema.getWatermarkSpecs().get(0).getRowtimeAttribute();
		if (rowtime.contains(".")) {
			throw new ValidationException(
					String.format("Nested field '%s' as rowtime attribute is not supported right now.", rowtime));
		}
	} else {
		rowtime = null;
	}

	String[] fieldNames = tableSchema.getFieldNames();
	DataType[] fieldTypes = tableSchema.getFieldDataTypes();

	TableSchema.Builder builder = TableSchema.builder();
	for (int i = 0; i < tableSchema.getFieldCount(); ++i) {
		TableColumn tableColumn = tableSchema.getTableColumns().get(i);
		DataType fieldType = fieldTypes[i];

		if (tableColumn.isGenerated()) {
			fieldType = resolveExpressionDataType(tableColumn.getExpr().get(), tableSchema);
			if (isProctime(fieldType)) {
				if (fieldNames[i].equals(rowtime)) {
					throw new TableException("Watermark can not be defined for a processing time attribute column.");
				}
			}
		}

		if (isStreamingMode && fieldNames[i].equals(rowtime)) {
			TimestampType originalType = (TimestampType) fieldType.getLogicalType();
			LogicalType rowtimeType = new TimestampType(
					originalType.isNullable(),
					TimestampKind.ROWTIME,
					originalType.getPrecision());
			fieldType = TypeConversions.fromLogicalToDataType(rowtimeType);
		}

		if (tableColumn.isGenerated()) {
			builder.field(fieldNames[i], fieldType, tableColumn.getExpr().get());
		} else {
			builder.field(fieldNames[i], fieldType);
		}
	}

	tableSchema.getWatermarkSpecs().forEach(builder::watermark);
	tableSchema.getPrimaryKey().ifPresent(
			pk -> builder.primaryKey(pk.getName(), pk.getColumns().toArray(new String[0])));
	return builder.build();
}
 
Example 19
Source File: CsvTableSourceFactoryBase.java    From flink with Apache License 2.0 4 votes vote down vote up
protected CsvTableSource createTableSource(
		Boolean isStreaming,
		Map<String, String> properties) {

	DescriptorProperties params = new DescriptorProperties();
	params.putProperties(properties);

	// validate
	new FileSystemValidator().validate(params);
	new OldCsvValidator().validate(params);
	new SchemaValidator(isStreaming, false, false).validate(params);

	// build
	CsvTableSource.Builder csvTableSourceBuilder = new CsvTableSource.Builder();

	TableSchema tableSchema = TableSchemaUtils.getPhysicalSchema(params.getTableSchema(SCHEMA));

	// if a schema is defined, no matter derive schema is set or not, will use the defined schema
	final boolean hasSchema = params.hasPrefix(FORMAT_FIELDS);
	if (hasSchema) {
		TableSchema formatSchema = params.getTableSchema(FORMAT_FIELDS);
		// the CsvTableSource needs some rework first
		// for now the schema must be equal to the encoding
		// Ignore conversion classes in DataType
		if (!getFieldLogicalTypes(formatSchema).equals(getFieldLogicalTypes(tableSchema))) {
			throw new TableException(String.format(
					"Encodings that differ from the schema are not supported yet for" +
							" CsvTableSource, format schema is '%s', but table schema is '%s'.",
					formatSchema,
					tableSchema));
		}
	}

	params.getOptionalString(CONNECTOR_PATH).ifPresent(csvTableSourceBuilder::path);
	params.getOptionalString(FORMAT_FIELD_DELIMITER).ifPresent(csvTableSourceBuilder::fieldDelimiter);
	params.getOptionalString(FORMAT_LINE_DELIMITER).ifPresent(csvTableSourceBuilder::lineDelimiter);

	for (int i = 0; i < tableSchema.getFieldCount(); ++i) {
		csvTableSourceBuilder.field(tableSchema.getFieldNames()[i], tableSchema.getFieldDataTypes()[i]);
	}
	params.getOptionalCharacter(FORMAT_QUOTE_CHARACTER).ifPresent(csvTableSourceBuilder::quoteCharacter);
	params.getOptionalString(FORMAT_COMMENT_PREFIX).ifPresent(csvTableSourceBuilder::commentPrefix);
	params.getOptionalBoolean(FORMAT_IGNORE_FIRST_LINE).ifPresent(flag -> {
		if (flag) {
			csvTableSourceBuilder.ignoreFirstLine();
		}
	});

	params.getOptionalBoolean(FORMAT_IGNORE_PARSE_ERRORS).ifPresent(flag -> {
		if (flag) {
			csvTableSourceBuilder.ignoreParseErrors();
		}
	});

	return csvTableSourceBuilder.build();
}
 
Example 20
Source File: CsvTableSourceFactoryBase.java    From flink with Apache License 2.0 4 votes vote down vote up
protected CsvTableSource createTableSource(
		Boolean isStreaming,
		Map<String, String> properties) {

	DescriptorProperties params = new DescriptorProperties();
	params.putProperties(properties);

	// validate
	new FileSystemValidator().validate(params);
	new OldCsvValidator().validate(params);
	new SchemaValidator(isStreaming, false, false).validate(params);

	// build
	CsvTableSource.Builder csvTableSourceBuilder = new CsvTableSource.Builder();

	TableSchema formatSchema = params.getTableSchema(FORMAT_FIELDS);
	TableSchema tableSchema = params.getTableSchema(SCHEMA);

	// the CsvTableSource needs some rework first
	// for now the schema must be equal to the encoding
	if (!formatSchema.equals(tableSchema)) {
		throw new TableException(
				"Encodings that differ from the schema are not supported yet for CsvTableSources.");
	}

	params.getOptionalString(CONNECTOR_PATH).ifPresent(csvTableSourceBuilder::path);
	params.getOptionalString(FORMAT_FIELD_DELIMITER).ifPresent(csvTableSourceBuilder::fieldDelimiter);
	params.getOptionalString(FORMAT_LINE_DELIMITER).ifPresent(csvTableSourceBuilder::lineDelimiter);

	for (int i = 0; i < formatSchema.getFieldCount(); ++i) {
		csvTableSourceBuilder.field(formatSchema.getFieldNames()[i], formatSchema.getFieldTypes()[i]);
	}
	params.getOptionalCharacter(FORMAT_QUOTE_CHARACTER).ifPresent(csvTableSourceBuilder::quoteCharacter);
	params.getOptionalString(FORMAT_COMMENT_PREFIX).ifPresent(csvTableSourceBuilder::commentPrefix);
	params.getOptionalBoolean(FORMAT_IGNORE_FIRST_LINE).ifPresent(flag -> {
		if (flag) {
			csvTableSourceBuilder.ignoreFirstLine();
		}
	});

	params.getOptionalBoolean(FORMAT_IGNORE_PARSE_ERRORS).ifPresent(flag -> {
		if (flag) {
			csvTableSourceBuilder.ignoreParseErrors();
		}
	});

	return csvTableSourceBuilder.build();
}