org.apache.flink.table.api.TableException Java Examples

The following examples show how to use org.apache.flink.table.api.TableException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SqlToOperationConverter.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * This is the main entrance for executing all kinds of DDL/DML {@code SqlNode}s, different
 * SqlNode will have it's implementation in the #convert(type) method whose 'type' argument
 * is subclass of {@code SqlNode}.
 *
 * @param flinkPlanner     FlinkPlannerImpl to convert sql node to rel node
 * @param sqlNode          SqlNode to execute on
 */
public static Operation convert(FlinkPlannerImpl flinkPlanner, SqlNode sqlNode) {
	// validate the query
	final SqlNode validated = flinkPlanner.validate(sqlNode);
	SqlToOperationConverter converter = new SqlToOperationConverter(flinkPlanner);
	if (validated instanceof SqlCreateTable) {
		return converter.convertCreateTable((SqlCreateTable) validated);
	} if (validated instanceof SqlDropTable) {
		return converter.convertDropTable((SqlDropTable) validated);
	} else if (validated instanceof RichSqlInsert) {
		return converter.convertSqlInsert((RichSqlInsert) validated);
	} else if (validated.getKind().belongsTo(SqlKind.QUERY)) {
		return converter.convertSqlQuery(validated);
	} else {
		throw new TableException("Unsupported node type "
			+ validated.getClass().getSimpleName());
	}
}
 
Example #2
Source File: KafkaTableSourceSinkFactoryBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private Optional<FlinkKafkaPartitioner<Row>> getFlinkKafkaPartitioner(DescriptorProperties descriptorProperties) {
	return descriptorProperties
		.getOptionalString(CONNECTOR_SINK_PARTITIONER)
		.flatMap((String partitionerString) -> {
			switch (partitionerString) {
				case CONNECTOR_SINK_PARTITIONER_VALUE_FIXED:
					return Optional.of(new FlinkFixedPartitioner<>());
				case CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN:
					return Optional.empty();
				case CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM:
					final Class<? extends FlinkKafkaPartitioner> partitionerClass =
						descriptorProperties.getClass(CONNECTOR_SINK_PARTITIONER_CLASS, FlinkKafkaPartitioner.class);
					return Optional.of((FlinkKafkaPartitioner<Row>) InstantiationUtil.instantiate(partitionerClass));
				default:
					throw new TableException("Unsupported sink partitioner. Validator should have checked that.");
			}
		});
}
 
Example #3
Source File: CassandraAppendTableSink.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) {
	if (!(dataStream.getType() instanceof RowTypeInfo)) {
		throw new TableException("No support for the type of the given DataStream: " + dataStream.getType());
	}

	CassandraRowSink sink = new CassandraRowSink(
		dataStream.getType().getArity(),
		cql,
		builder,
		CassandraSinkBaseConfig.newBuilder().build(),
		new NoOpCassandraFailureHandler());

	return dataStream
			.addSink(sink)
			.setParallelism(dataStream.getParallelism())
			.name(TableConnectorUtils.generateRuntimeName(this.getClass(), fieldNames));

}
 
Example #4
Source File: SqlToOperationConverter.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * This is the main entrance for executing all kinds of DDL/DML {@code SqlNode}s, different
 * SqlNode will have it's implementation in the #convert(type) method whose 'type' argument
 * is subclass of {@code SqlNode}.
 *
 * @param flinkPlanner     FlinkPlannerImpl to convertCreateTable sql node to rel node
 * @param sqlNode          SqlNode to execute on
 */
public static Operation convert(FlinkPlannerImpl flinkPlanner, SqlNode sqlNode) {
	// validate the query
	final SqlNode validated = flinkPlanner.validate(sqlNode);
	SqlToOperationConverter converter = new SqlToOperationConverter(flinkPlanner);
	if (validated instanceof SqlCreateTable) {
		return converter.convertCreateTable((SqlCreateTable) validated);
	} if (validated instanceof SqlDropTable) {
		return converter.convertDropTable((SqlDropTable) validated);
	} else if (validated instanceof RichSqlInsert) {
		return converter.convertSqlInsert((RichSqlInsert) validated);
	} else if (validated.getKind().belongsTo(SqlKind.QUERY)) {
		return converter.convertSqlQuery(validated);
	} else {
		throw new TableException("Unsupported node type "
			+ validated.getClass().getSimpleName());
	}
}
 
Example #5
Source File: RexNodeConverter.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Extracts a value from a literal. Including planner-specific instances such as {@link Decimal}.
 */
@SuppressWarnings("unchecked")
private static <T> T extractValue(ValueLiteralExpression literal, Class<T> clazz) {
	final Optional<Object> possibleObject = literal.getValueAs(Object.class);
	if (!possibleObject.isPresent()) {
		throw new TableException("Invalid literal.");
	}
	final Object object = possibleObject.get();

	if (clazz.equals(BigDecimal.class)) {
		final Optional<BigDecimal> possibleDecimal = literal.getValueAs(BigDecimal.class);
		if (possibleDecimal.isPresent()) {
			return (T) possibleDecimal.get();
		}
		if (object instanceof Decimal) {
			return (T) ((Decimal) object).toBigDecimal();
		}
	}

	return literal.getValueAs(clazz)
			.orElseThrow(() -> new TableException("Unsupported literal class: " + clazz));
}
 
Example #6
Source File: BuiltInFunctionDefinitions.java    From flink with Apache License 2.0 6 votes vote down vote up
public static List<BuiltInFunctionDefinition> getDefinitions() {
	final Field[] fields = BuiltInFunctionDefinitions.class.getFields();
	final List<BuiltInFunctionDefinition> list = new ArrayList<>(fields.length);
	for (Field field : fields) {
		if (FunctionDefinition.class.isAssignableFrom(field.getType())) {
			try {
				final BuiltInFunctionDefinition funcDef = (BuiltInFunctionDefinition) field.get(BuiltInFunctionDefinitions.class);
				list.add(Preconditions.checkNotNull(funcDef));
			} catch (IllegalAccessException e) {
				throw new TableException(
					"The function definition for field " + field.getName() + " is not accessible.", e);
			}
		}
	}
	return list;
}
 
Example #7
Source File: ExecutionContext.java    From flink with Apache License 2.0 6 votes vote down vote up
private static Executor lookupExecutor(
		Map<String, String> executorProperties,
		StreamExecutionEnvironment executionEnvironment) {
	try {
		ExecutorFactory executorFactory = ComponentFactoryService.find(ExecutorFactory.class, executorProperties);
		Method createMethod = executorFactory.getClass()
			.getMethod("create", Map.class, StreamExecutionEnvironment.class);

		return (Executor) createMethod.invoke(
			executorFactory,
			executorProperties,
			executionEnvironment);
	} catch (Exception e) {
		throw new TableException(
			"Could not instantiate the executor. Make sure a planner module is on the classpath",
			e);
	}
}
 
Example #8
Source File: ProjectionOperationFactory.java    From flink with Apache License 2.0 6 votes vote down vote up
private Optional<String> extractNameFromGet(CallExpression call) {
	Expression child = call.getChildren().get(0);
	ValueLiteralExpression key = (ValueLiteralExpression) call.getChildren().get(1);

	final LogicalType keyType = key.getOutputDataType().getLogicalType();

	final String keySuffix;
	if (hasRoot(keyType, INTEGER)) {
		keySuffix = "$_" + key.getValueAs(Integer.class)
			.orElseThrow(() -> new TableException("Integer constant excepted."));
	} else {
		keySuffix = "$" + key.getValueAs(String.class)
			.orElseThrow(() -> new TableException("Integer constant excepted."));
	}
	return child.accept(this).map(p -> p + keySuffix);
}
 
Example #9
Source File: KafkaTableSourceSinkFactoryBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	final TableSchema schema = descriptorProperties.getTableSchema(SCHEMA());
	final String topic = descriptorProperties.getString(CONNECTOR_TOPIC);
	final Optional<String> proctime = SchemaValidator.deriveProctimeAttribute(descriptorProperties);
	final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors =
		SchemaValidator.deriveRowtimeAttributes(descriptorProperties);

	// see also FLINK-9870
	if (proctime.isPresent() || !rowtimeAttributeDescriptors.isEmpty() ||
			checkForCustomFieldMapping(descriptorProperties, schema)) {
		throw new TableException("Time attributes and custom field mappings are not supported yet.");
	}

	return createKafkaTableSink(
		schema,
		topic,
		getKafkaProperties(descriptorProperties),
		getFlinkKafkaPartitioner(descriptorProperties),
		getSerializationSchema(properties));
}
 
Example #10
Source File: AggFunctionTestBase.java    From flink with Apache License 2.0 6 votes vote down vote up
protected ACC accumulateValues(List<T> values)
		throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
	AggregateFunction<T, ACC> aggregator = getAggregator();
	ACC accumulator = getAggregator().createAccumulator();
	Method accumulateFunc = getAccumulateFunc();
	for (T value : values) {
		if (accumulateFunc.getParameterCount() == 1) {
			accumulateFunc.invoke(aggregator, (Object) accumulator);
		} else if (accumulateFunc.getParameterCount() == 2) {
			accumulateFunc.invoke(aggregator, (Object) accumulator, (Object) value);
		} else {
			throw new TableException("Unsupported now");
		}
	}
	return accumulator;
}
 
Example #11
Source File: KafkaTableSourceSinkFactoryBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private Optional<FlinkKafkaPartitioner<Row>> getFlinkKafkaPartitioner(DescriptorProperties descriptorProperties) {
	return descriptorProperties
		.getOptionalString(CONNECTOR_SINK_PARTITIONER)
		.flatMap((String partitionerString) -> {
			switch (partitionerString) {
				case CONNECTOR_SINK_PARTITIONER_VALUE_FIXED:
					return Optional.of(new FlinkFixedPartitioner<>());
				case CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN:
					return Optional.empty();
				case CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM:
					final Class<? extends FlinkKafkaPartitioner> partitionerClass =
						descriptorProperties.getClass(CONNECTOR_SINK_PARTITIONER_CLASS, FlinkKafkaPartitioner.class);
					return Optional.of((FlinkKafkaPartitioner<Row>) InstantiationUtil.instantiate(partitionerClass));
				default:
					throw new TableException("Unsupported sink partitioner. Validator should have checked that.");
			}
		});
}
 
Example #12
Source File: TableEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void registerTableSink(String name, TableSink<?> configuredSink) {
	// validate
	if (configuredSink.getTableSchema().getFieldCount() == 0) {
		throw new TableException("Table schema cannot be empty.");
	}

	checkValidTableName(name);
	registerTableSinkInternal(name, configuredSink);
}
 
Example #13
Source File: StreamTableEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
public static StreamTableEnvironment create(
		StreamExecutionEnvironment executionEnvironment,
		EnvironmentSettings settings,
		TableConfig tableConfig) {

	if (!settings.isStreamingMode()) {
		throw new TableException(
			"StreamTableEnvironment can not run in batch mode for now, please use TableEnvironment.");
	}

	CatalogManager catalogManager = new CatalogManager(
		settings.getBuiltInCatalogName(),
		new GenericInMemoryCatalog(settings.getBuiltInCatalogName(), settings.getBuiltInDatabaseName()));

	FunctionCatalog functionCatalog = new FunctionCatalog(catalogManager);

	Map<String, String> executorProperties = settings.toExecutorProperties();
	Executor executor = lookupExecutor(executorProperties, executionEnvironment);

	Map<String, String> plannerProperties = settings.toPlannerProperties();
	Planner planner = ComponentFactoryService.find(PlannerFactory.class, plannerProperties)
		.create(plannerProperties, executor, tableConfig, functionCatalog, catalogManager);

	return new StreamTableEnvironmentImpl(
		catalogManager,
		functionCatalog,
		tableConfig,
		executionEnvironment,
		planner,
		executor,
		settings.isStreamingMode()
	);
}
 
Example #14
Source File: TableEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public String[] listTables() {
	String currentCatalogName = catalogManager.getCurrentCatalog();
	Optional<Catalog> currentCatalog = catalogManager.getCatalog(currentCatalogName);

	return currentCatalog.map(catalog -> {
		try {
			return catalog.listTables(catalogManager.getCurrentDatabase()).toArray(new String[0]);
		} catch (DatabaseNotExistException e) {
			throw new ValidationException("Current database does not exist", e);
		}
	}).orElseThrow(() ->
		new TableException(String.format("The current catalog %s does not exist.", currentCatalogName)));
}
 
Example #15
Source File: TableEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
protected void registerTableInternal(String name, CatalogBaseTable table) {
	try {
		checkValidTableName(name);
		ObjectPath path = new ObjectPath(catalogManager.getBuiltInDatabaseName(), name);
		Optional<Catalog> catalog = catalogManager.getCatalog(catalogManager.getBuiltInCatalogName());
		if (catalog.isPresent()) {
			catalog.get().createTable(
				path,
				table,
				false);
		}
	} catch (Exception e) {
		throw new TableException("Could not register table", e);
	}
}
 
Example #16
Source File: TableEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
private void replaceTableInternal(String name, CatalogBaseTable table) {
	try {
		ObjectPath path = new ObjectPath(catalogManager.getBuiltInDatabaseName(), name);
		Optional<Catalog> catalog = catalogManager.getCatalog(catalogManager.getBuiltInCatalogName());
		if (catalog.isPresent()) {
			catalog.get().alterTable(
				path,
				table,
				false);
		}
	} catch (Exception e) {
		throw new TableException("Could not register table", e);
	}
}
 
Example #17
Source File: AggFunctionTestBase.java    From flink with Apache License 2.0 5 votes vote down vote up
protected void retractValues(ACC accumulator, List<T> values)
		throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
	AggregateFunction<T, ACC> aggregator = getAggregator();
	Method retractFunc = getRetractFunc();
	for (T value : values) {
		if (retractFunc.getParameterCount() == 1) {
			retractFunc.invoke(aggregator, (Object) accumulator);
		} else if (retractFunc.getParameterCount() == 2) {
			retractFunc.invoke(aggregator, (Object) accumulator, (Object) value);
		} else {
			throw new TableException("Unsupported now");
		}
	}
}
 
Example #18
Source File: ExpressionResolver.java    From flink with Apache License 2.0 5 votes vote down vote up
private Expression resolveFieldsInSingleExpression(Expression expression) {
	List<Expression> expressions = ResolverRules.FIELD_RESOLVE.apply(
		Collections.singletonList(expression),
		new ExpressionResolverContext());

	if (expressions.size() != 1) {
		throw new TableException("Expected a single expression as a result. Got: " + expressions);
	}

	return expressions.get(0);
}
 
Example #19
Source File: FunctionLookup.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Helper method for looking up a built-in function.
 */
default Result lookupBuiltInFunction(BuiltInFunctionDefinition definition) {
	return lookupFunction(definition.getName())
		.orElseThrow(() -> new TableException(
			String.format(
				"Required built-in function [%s] could not be found in any catalog.",
				definition.getName())
			)
		);
}
 
Example #20
Source File: LogicalTypeDuplicator.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public LogicalType visit(StructuredType structuredType) {
	final List<StructuredAttribute> attributes = structuredType.getAttributes().stream()
		.map(a -> {
			if (a.getDescription().isPresent()) {
				return new StructuredAttribute(
					a.getName(),
					a.getType().accept(this),
					a.getDescription().get());
			}
			return new StructuredAttribute(
				a.getName(),
				a.getType().accept(this));
		})
		.collect(Collectors.toList());
	final StructuredType.Builder builder = new StructuredType.Builder(
		structuredType.getObjectIdentifier(),
		attributes);
	builder.setNullable(structuredType.isNullable());
	builder.setFinal(structuredType.isFinal());
	builder.setInstantiable(structuredType.isInstantiable());
	builder.setComparision(structuredType.getComparision());
	structuredType.getSuperType().ifPresent(st -> {
		final LogicalType visited = st.accept(this);
		if (!(visited instanceof StructuredType)) {
			throw new TableException("Unexpected super type. Structured type expected but was: " + visited);
		}
		builder.setSuperType((StructuredType) visited);
	});
	structuredType.getDescription().ifPresent(builder::setDescription);
	structuredType.getImplementationClass().ifPresent(builder::setImplementationClass);
	return builder.build();
}
 
Example #21
Source File: ProjectionOperationFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
private String[] validateAndGetUniqueNames(List<ResolvedExpression> namedExpressions) {
	// we need to maintain field names order to match with types
	final Set<String> names = new LinkedHashSet<>();

	extractNames(namedExpressions).stream()
		.map(name -> name.orElseThrow(() -> new TableException("Could not name a field in a projection.")))
		.forEach(name -> {
			if (!names.add(name)) {
				throw new ValidationException("Ambiguous column name: " + name);
			}
		});

	return names.toArray(new String[0]);
}
 
Example #22
Source File: VarCharType.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public String asSerializableString() {
	if (length == EMPTY_LITERAL_LENGTH) {
		throw new TableException(
			"Zero-length character strings have no serializable string representation.");
	}
	return withNullability(FORMAT, length);
}
 
Example #23
Source File: VarBinaryType.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public String asSerializableString() {
	if (length == EMPTY_LITERAL_LENGTH) {
		throw new TableException(
			"Zero-length binary strings have no serializable string representation.");
	}
	return withNullability(FORMAT, length);
}
 
Example #24
Source File: QueryOperationConverter.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public AggCall visit(CallExpression unresolvedCall) {
	if (unresolvedCall.getFunctionDefinition() == AS) {
		String aggregateName = extractValue(unresolvedCall.getChildren().get(1), String.class)
				.orElseThrow(() -> new TableException("Unexpected name."));

		Expression aggregate = unresolvedCall.getChildren().get(0);
		if (isFunctionOfKind(aggregate, AGGREGATE)) {
			return aggregate.accept(callResolver).accept(
					new AggCallVisitor(relBuilder, rexNodeConverter, aggregateName, false));
		}
	}
	throw new TableException("Expected named aggregate. Got: " + unresolvedCall);
}
 
Example #25
Source File: EncodingUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
public static byte[] md5(String string) {
	try {
		return MessageDigest.getInstance("MD5").digest(string.getBytes(UTF_8));
	} catch (NoSuchAlgorithmException e) {
		throw new TableException("Unsupported MD5 algorithm.", e);
	}
}
 
Example #26
Source File: TableSink.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the schema of the consumed table.
 *
 * @return The {@link TableSchema} of the consumed table.
 */
default TableSchema getTableSchema() {
	final String[] fieldNames = getFieldNames();
	final TypeInformation[] legacyFieldTypes = getFieldTypes();
	if (fieldNames == null || legacyFieldTypes == null) {
		throw new TableException("Table sink does not implement a table schema.");
	}
	return new TableSchema(fieldNames, legacyFieldTypes);
}
 
Example #27
Source File: TableSink.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the data type consumed by this {@link TableSink}.
 *
 * @return The data type expected by this {@link TableSink}.
 */
default DataType getConsumedDataType() {
	final TypeInformation<T> legacyType = getOutputType();
	if (legacyType == null) {
		throw new TableException("Table sink does not implement a consumed data type.");
	}
	return fromLegacyInfoToDataType(legacyType);
}
 
Example #28
Source File: ExpressionResolver.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected ResolvedExpression defaultMethod(Expression expression) {
	if (expression instanceof ResolvedExpression) {
		return (ResolvedExpression) expression;
	}
	throw new TableException(
		"All expressions should have been resolved at this stage. Unexpected expression: " +
			expression);
}
 
Example #29
Source File: JavaTableEnvironmentITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test(expected = TableException.class)
public void testCustomCalciteConfig() {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	BatchTableEnvironment tableEnv = BatchTableEnvironment.create(env, config());

	PlannerConfig cc = new CalciteConfigBuilder()
			.replaceLogicalOptRuleSet(RuleSets.ofList())
			.replacePhysicalOptRuleSet(RuleSets.ofList())
			.build();
	tableEnv.getConfig().setPlannerConfig(cc);

	DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.get3TupleDataSet(env);
	Table t = tableEnv.fromDataSet(ds);
	tableEnv.toDataSet(t, Row.class);
}
 
Example #30
Source File: JavaTableEnvironmentITCase.java    From flink with Apache License 2.0 5 votes vote down vote up
@Test(expected = TableException.class)
public void testRegisterTableFromOtherEnv() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	BatchTableEnvironment tableEnv1 = BatchTableEnvironment.create(env, config());
	BatchTableEnvironment tableEnv2 = BatchTableEnvironment.create(env, config());

	Table t = tableEnv1.fromDataSet(CollectionDataSets.get3TupleDataSet(env));
	// Must fail. Table is bound to different TableEnvironment.
	tableEnv2.registerTable("MyTable", t);
}