Java Code Examples for org.apache.flink.table.api.TableException

The following examples show how to use org.apache.flink.table.api.TableException. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
@Override
public StreamTableSink<Row> createStreamTableSink(Map<String, String> properties) {
	final DescriptorProperties descriptorProperties = getValidatedProperties(properties);

	final TableSchema schema = descriptorProperties.getTableSchema(SCHEMA());
	final String topic = descriptorProperties.getString(CONNECTOR_TOPIC);
	final Optional<String> proctime = SchemaValidator.deriveProctimeAttribute(descriptorProperties);
	final List<RowtimeAttributeDescriptor> rowtimeAttributeDescriptors =
		SchemaValidator.deriveRowtimeAttributes(descriptorProperties);

	// see also FLINK-9870
	if (proctime.isPresent() || !rowtimeAttributeDescriptors.isEmpty() ||
			checkForCustomFieldMapping(descriptorProperties, schema)) {
		throw new TableException("Time attributes and custom field mappings are not supported yet.");
	}

	return createKafkaTableSink(
		schema,
		topic,
		getKafkaProperties(descriptorProperties),
		getFlinkKafkaPartitioner(descriptorProperties),
		getSerializationSchema(properties));
}
 
Example 2
@SuppressWarnings("unchecked")
private Optional<FlinkKafkaPartitioner<Row>> getFlinkKafkaPartitioner(DescriptorProperties descriptorProperties) {
	return descriptorProperties
		.getOptionalString(CONNECTOR_SINK_PARTITIONER)
		.flatMap((String partitionerString) -> {
			switch (partitionerString) {
				case CONNECTOR_SINK_PARTITIONER_VALUE_FIXED:
					return Optional.of(new FlinkFixedPartitioner<>());
				case CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN:
					return Optional.empty();
				case CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM:
					final Class<? extends FlinkKafkaPartitioner> partitionerClass =
						descriptorProperties.getClass(CONNECTOR_SINK_PARTITIONER_CLASS, FlinkKafkaPartitioner.class);
					return Optional.of((FlinkKafkaPartitioner<Row>) InstantiationUtil.instantiate(partitionerClass));
				default:
					throw new TableException("Unsupported sink partitioner. Validator should have checked that.");
			}
		});
}
 
Example 3
Source Project: flink   Source File: ProjectionOperationFactory.java    License: Apache License 2.0 6 votes vote down vote up
private Optional<String> extractNameFromGet(CallExpression call) {
	Expression child = call.getChildren().get(0);
	ValueLiteralExpression key = (ValueLiteralExpression) call.getChildren().get(1);

	final LogicalType keyType = key.getOutputDataType().getLogicalType();

	final String keySuffix;
	if (hasRoot(keyType, INTEGER)) {
		keySuffix = "$_" + key.getValueAs(Integer.class)
			.orElseThrow(() -> new TableException("Integer constant excepted."));
	} else {
		keySuffix = "$" + key.getValueAs(String.class)
			.orElseThrow(() -> new TableException("Integer constant excepted."));
	}
	return child.accept(this).map(p -> p + keySuffix);
}
 
Example 4
Source Project: flink   Source File: ExecutionContext.java    License: Apache License 2.0 6 votes vote down vote up
private static Executor lookupExecutor(
		Map<String, String> executorProperties,
		StreamExecutionEnvironment executionEnvironment) {
	try {
		ExecutorFactory executorFactory = ComponentFactoryService.find(ExecutorFactory.class, executorProperties);
		Method createMethod = executorFactory.getClass()
			.getMethod("create", Map.class, StreamExecutionEnvironment.class);

		return (Executor) createMethod.invoke(
			executorFactory,
			executorProperties,
			executionEnvironment);
	} catch (Exception e) {
		throw new TableException(
			"Could not instantiate the executor. Make sure a planner module is on the classpath",
			e);
	}
}
 
Example 5
Source Project: flink   Source File: RexNodeConverter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Extracts a value from a literal. Including planner-specific instances such as {@link Decimal}.
 */
@SuppressWarnings("unchecked")
private static <T> T extractValue(ValueLiteralExpression literal, Class<T> clazz) {
	final Optional<Object> possibleObject = literal.getValueAs(Object.class);
	if (!possibleObject.isPresent()) {
		throw new TableException("Invalid literal.");
	}
	final Object object = possibleObject.get();

	if (clazz.equals(BigDecimal.class)) {
		final Optional<BigDecimal> possibleDecimal = literal.getValueAs(BigDecimal.class);
		if (possibleDecimal.isPresent()) {
			return (T) possibleDecimal.get();
		}
		if (object instanceof Decimal) {
			return (T) ((Decimal) object).toBigDecimal();
		}
	}

	return literal.getValueAs(clazz)
			.orElseThrow(() -> new TableException("Unsupported literal class: " + clazz));
}
 
Example 6
Source Project: flink   Source File: CassandraAppendTableSink.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public DataStreamSink<?> consumeDataStream(DataStream<Row> dataStream) {
	if (!(dataStream.getType() instanceof RowTypeInfo)) {
		throw new TableException("No support for the type of the given DataStream: " + dataStream.getType());
	}

	CassandraRowSink sink = new CassandraRowSink(
		dataStream.getType().getArity(),
		cql,
		builder,
		CassandraSinkBaseConfig.newBuilder().build(),
		new NoOpCassandraFailureHandler());

	return dataStream
			.addSink(sink)
			.setParallelism(dataStream.getParallelism())
			.name(TableConnectorUtils.generateRuntimeName(this.getClass(), fieldNames));

}
 
Example 7
Source Project: flink   Source File: KafkaTableSourceSinkFactoryBase.java    License: Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("unchecked")
private Optional<FlinkKafkaPartitioner<Row>> getFlinkKafkaPartitioner(DescriptorProperties descriptorProperties) {
	return descriptorProperties
		.getOptionalString(CONNECTOR_SINK_PARTITIONER)
		.flatMap((String partitionerString) -> {
			switch (partitionerString) {
				case CONNECTOR_SINK_PARTITIONER_VALUE_FIXED:
					return Optional.of(new FlinkFixedPartitioner<>());
				case CONNECTOR_SINK_PARTITIONER_VALUE_ROUND_ROBIN:
					return Optional.empty();
				case CONNECTOR_SINK_PARTITIONER_VALUE_CUSTOM:
					final Class<? extends FlinkKafkaPartitioner> partitionerClass =
						descriptorProperties.getClass(CONNECTOR_SINK_PARTITIONER_CLASS, FlinkKafkaPartitioner.class);
					return Optional.of((FlinkKafkaPartitioner<Row>) InstantiationUtil.instantiate(partitionerClass));
				default:
					throw new TableException("Unsupported sink partitioner. Validator should have checked that.");
			}
		});
}
 
Example 8
Source Project: flink   Source File: SqlToOperationConverter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * This is the main entrance for executing all kinds of DDL/DML {@code SqlNode}s, different
 * SqlNode will have it's implementation in the #convert(type) method whose 'type' argument
 * is subclass of {@code SqlNode}.
 *
 * @param flinkPlanner     FlinkPlannerImpl to convert sql node to rel node
 * @param sqlNode          SqlNode to execute on
 */
public static Operation convert(FlinkPlannerImpl flinkPlanner, SqlNode sqlNode) {
	// validate the query
	final SqlNode validated = flinkPlanner.validate(sqlNode);
	SqlToOperationConverter converter = new SqlToOperationConverter(flinkPlanner);
	if (validated instanceof SqlCreateTable) {
		return converter.convertCreateTable((SqlCreateTable) validated);
	} if (validated instanceof SqlDropTable) {
		return converter.convertDropTable((SqlDropTable) validated);
	} else if (validated instanceof RichSqlInsert) {
		return converter.convertSqlInsert((RichSqlInsert) validated);
	} else if (validated.getKind().belongsTo(SqlKind.QUERY)) {
		return converter.convertSqlQuery(validated);
	} else {
		throw new TableException("Unsupported node type "
			+ validated.getClass().getSimpleName());
	}
}
 
Example 9
Source Project: flink   Source File: AggFunctionTestBase.java    License: Apache License 2.0 6 votes vote down vote up
protected ACC accumulateValues(List<T> values)
		throws NoSuchMethodException, InvocationTargetException, IllegalAccessException {
	AggregateFunction<T, ACC> aggregator = getAggregator();
	ACC accumulator = getAggregator().createAccumulator();
	Method accumulateFunc = getAccumulateFunc();
	for (T value : values) {
		if (accumulateFunc.getParameterCount() == 1) {
			accumulateFunc.invoke(aggregator, (Object) accumulator);
		} else if (accumulateFunc.getParameterCount() == 2) {
			accumulateFunc.invoke(aggregator, (Object) accumulator, (Object) value);
		} else {
			throw new TableException("Unsupported now");
		}
	}
	return accumulator;
}
 
Example 10
Source Project: flink   Source File: SqlToOperationConverter.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * This is the main entrance for executing all kinds of DDL/DML {@code SqlNode}s, different
 * SqlNode will have it's implementation in the #convert(type) method whose 'type' argument
 * is subclass of {@code SqlNode}.
 *
 * @param flinkPlanner     FlinkPlannerImpl to convertCreateTable sql node to rel node
 * @param sqlNode          SqlNode to execute on
 */
public static Operation convert(FlinkPlannerImpl flinkPlanner, SqlNode sqlNode) {
	// validate the query
	final SqlNode validated = flinkPlanner.validate(sqlNode);
	SqlToOperationConverter converter = new SqlToOperationConverter(flinkPlanner);
	if (validated instanceof SqlCreateTable) {
		return converter.convertCreateTable((SqlCreateTable) validated);
	} if (validated instanceof SqlDropTable) {
		return converter.convertDropTable((SqlDropTable) validated);
	} else if (validated instanceof RichSqlInsert) {
		return converter.convertSqlInsert((RichSqlInsert) validated);
	} else if (validated.getKind().belongsTo(SqlKind.QUERY)) {
		return converter.convertSqlQuery(validated);
	} else {
		throw new TableException("Unsupported node type "
			+ validated.getClass().getSimpleName());
	}
}
 
Example 11
Source Project: flink   Source File: BuiltInFunctionDefinitions.java    License: Apache License 2.0 6 votes vote down vote up
public static List<BuiltInFunctionDefinition> getDefinitions() {
	final Field[] fields = BuiltInFunctionDefinitions.class.getFields();
	final List<BuiltInFunctionDefinition> list = new ArrayList<>(fields.length);
	for (Field field : fields) {
		if (FunctionDefinition.class.isAssignableFrom(field.getType())) {
			try {
				final BuiltInFunctionDefinition funcDef = (BuiltInFunctionDefinition) field.get(BuiltInFunctionDefinitions.class);
				list.add(Preconditions.checkNotNull(funcDef));
			} catch (IllegalAccessException e) {
				throw new TableException(
					"The function definition for field " + field.getName() + " is not accessible.", e);
			}
		}
	}
	return list;
}
 
Example 12
private StartupOptions getStartupOptions(
		DescriptorProperties descriptorProperties,
		String topic) {
	final Map<KafkaTopicPartition, Long> specificOffsets = new HashMap<>();
	final StartupMode startupMode = descriptorProperties
		.getOptionalString(CONNECTOR_STARTUP_MODE)
		.map(modeString -> {
			switch (modeString) {
				case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_EARLIEST:
					return StartupMode.EARLIEST;

				case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_LATEST:
					return StartupMode.LATEST;

				case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_GROUP_OFFSETS:
					return StartupMode.GROUP_OFFSETS;

				case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_SPECIFIC_OFFSETS:
					final List<Map<String, String>> offsetList = descriptorProperties.getFixedIndexedProperties(
						CONNECTOR_SPECIFIC_OFFSETS,
						Arrays.asList(CONNECTOR_SPECIFIC_OFFSETS_PARTITION, CONNECTOR_SPECIFIC_OFFSETS_OFFSET));
					offsetList.forEach(kv -> {
						final int partition = descriptorProperties.getInt(kv.get(CONNECTOR_SPECIFIC_OFFSETS_PARTITION));
						final long offset = descriptorProperties.getLong(kv.get(CONNECTOR_SPECIFIC_OFFSETS_OFFSET));
						final KafkaTopicPartition topicPartition = new KafkaTopicPartition(topic, partition);
						specificOffsets.put(topicPartition, offset);
					});
					return StartupMode.SPECIFIC_OFFSETS;
				default:
					throw new TableException("Unsupported startup mode. Validator should have checked that.");
			}
		}).orElse(StartupMode.GROUP_OFFSETS);
	final StartupOptions options = new StartupOptions();
	options.startupMode = startupMode;
	options.specificOffsets = specificOffsets;
	return options;
}
 
Example 13
Source Project: Flink-CEPplus   Source File: JavaTableEnvironmentITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = TableException.class)
public void testRegisterExistingDatasetTable() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	BatchTableEnvironment tableEnv = BatchTableEnvironment.create(env, config());

	DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.get3TupleDataSet(env);
	tableEnv.registerDataSet("MyTable", ds);
	DataSet<Tuple5<Integer, Long, Integer, String, Long>> ds2 =
			CollectionDataSets.getSmall5TupleDataSet(env);
	// Must fail. Name is already used for different table.
	tableEnv.registerDataSet("MyTable", ds2);
}
 
Example 14
Source Project: Flink-CEPplus   Source File: JavaTableEnvironmentITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = TableException.class)
public void testScanUnregisteredTable() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	BatchTableEnvironment tableEnv = BatchTableEnvironment.create(env, config());

	// Must fail. No table registered under that name.
	tableEnv.scan("nonRegisteredTable");
}
 
Example 15
Source Project: Flink-CEPplus   Source File: JavaTableEnvironmentITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = TableException.class)
public void testIllegalName() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	BatchTableEnvironment tableEnv = BatchTableEnvironment.create(env, config());

	DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.get3TupleDataSet(env);
	Table t = tableEnv.fromDataSet(ds);
	// Must fail. Table name matches internal name pattern.
	tableEnv.registerTable("_DataSetTable_42", t);
}
 
Example 16
Source Project: Flink-CEPplus   Source File: JavaTableEnvironmentITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = TableException.class)
public void testRegisterTableFromOtherEnv() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	BatchTableEnvironment tableEnv1 = BatchTableEnvironment.create(env, config());
	BatchTableEnvironment tableEnv2 = BatchTableEnvironment.create(env, config());

	Table t = tableEnv1.fromDataSet(CollectionDataSets.get3TupleDataSet(env));
	// Must fail. Table is bound to different TableEnvironment.
	tableEnv2.registerTable("MyTable", t);
}
 
Example 17
Source Project: Flink-CEPplus   Source File: JavaTableEnvironmentITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = TableException.class)
public void testGenericRow() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	BatchTableEnvironment tableEnv = BatchTableEnvironment.create(env, config());

	// use null value the enforce GenericType
	DataSet<Row> dataSet = env.fromElements(Row.of(1, 2L, "Hello", null));
	assertTrue(dataSet.getType() instanceof GenericTypeInfo);
	assertTrue(dataSet.getType().getTypeClass().equals(Row.class));

	// Must fail. Cannot import DataSet<Row> with GenericTypeInfo.
	tableEnv.fromDataSet(dataSet);
}
 
Example 18
Source Project: Flink-CEPplus   Source File: JavaTableEnvironmentITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = TableException.class)
public void testAsWithAmbiguousFields() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	BatchTableEnvironment tableEnv = BatchTableEnvironment.create(env, config());

	// Must fail. Specified field names are not unique.
	tableEnv.fromDataSet(CollectionDataSets.get3TupleDataSet(env), "a, b, b");
}
 
Example 19
Source Project: flink   Source File: AggregateOperationFactory.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Converts an API class to a resolved window for planning with expressions already resolved.
 * It performs following validations:
 * <ul>
 *     <li>The alias is represented with an unresolved reference</li>
 *     <li>The time attribute is a single field reference of a {@link TimeIndicatorTypeInfo}(stream),
 *     {@link SqlTimeTypeInfo}(batch), or {@link BasicTypeInfo#LONG_TYPE_INFO}(batch) type</li>
 *     <li>The size & slide are value literals of either {@link BasicTypeInfo#LONG_TYPE_INFO},
 *     or {@link TimeIntervalTypeInfo} type</li>
 *     <li>The size & slide are of the same type</li>
 *     <li>The gap is a value literal of a {@link TimeIntervalTypeInfo} type</li>
 * </ul>
 *
 * @param window window to resolve
 * @param resolver resolver to resolve potential unresolved field references
 * @return window with expressions resolved
 */
public ResolvedGroupWindow createResolvedWindow(GroupWindow window, ExpressionResolver resolver) {
	Expression alias = window.getAlias();

	if (!(alias instanceof UnresolvedReferenceExpression)) {
		throw new ValidationException("Only unresolved reference supported for alias of a group window.");
	}

	final String windowName = ((UnresolvedReferenceExpression) alias).getName();
	FieldReferenceExpression timeField = getValidatedTimeAttribute(window, resolver);

	if (window instanceof TumbleWithSizeOnTimeWithAlias) {
		return validateAndCreateTumbleWindow(
			(TumbleWithSizeOnTimeWithAlias) window,
			windowName,
			timeField);
	} else if (window instanceof SlideWithSizeAndSlideOnTimeWithAlias) {
		return validateAndCreateSlideWindow(
			(SlideWithSizeAndSlideOnTimeWithAlias) window,
			windowName,
			timeField);
	} else if (window instanceof SessionWithGapOnTimeWithAlias) {
		return validateAndCreateSessionWindow(
			(SessionWithGapOnTimeWithAlias) window,
			windowName,
			timeField);
	} else {
		throw new TableException("Unknown window type: " + window);
	}
}
 
Example 20
Source Project: Flink-CEPplus   Source File: JavaTableEnvironmentITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = TableException.class)
public void testAsWithNonFieldReference2() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	BatchTableEnvironment tableEnv = BatchTableEnvironment.create(env, config());

	// Must fail. as() does only allow field name expressions
	tableEnv.fromDataSet(CollectionDataSets.get3TupleDataSet(env), "a as foo, b,  c");
}
 
Example 21
Source Project: Flink-CEPplus   Source File: JavaTableEnvironmentITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = TableException.class)
public void testNonStaticClassInput() throws Exception {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	BatchTableEnvironment tableEnv = BatchTableEnvironment.create(env, config());

	// Must fail since class is not static
	tableEnv.fromDataSet(env.fromElements(new MyNonStatic()), "name");
}
 
Example 22
Source Project: Flink-CEPplus   Source File: JavaTableEnvironmentITCase.java    License: Apache License 2.0 5 votes vote down vote up
@Test(expected = TableException.class)
public void testCustomCalciteConfig() {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	BatchTableEnvironment tableEnv = BatchTableEnvironment.create(env, config());

	CalciteConfig cc = new CalciteConfigBuilder()
			.replaceLogicalOptRuleSet(RuleSets.ofList())
			.replacePhysicalOptRuleSet(RuleSets.ofList())
			.build();
	tableEnv.getConfig().setCalciteConfig(cc);

	DataSet<Tuple3<Integer, Long, String>> ds = CollectionDataSets.get3TupleDataSet(env);
	Table t = tableEnv.fromDataSet(ds);
	tableEnv.toDataSet(t, Row.class);
}
 
Example 23
Source Project: Flink-CEPplus   Source File: EncodingUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static byte[] md5(String string) {
	try {
		return MessageDigest.getInstance("MD5").digest(string.getBytes(UTF_8));
	} catch (NoSuchAlgorithmException e) {
		throw new TableException("Unsupported MD5 algorithm.", e);
	}
}
 
Example 24
Source Project: flink   Source File: CsvTableSinkFactoryBase.java    License: Apache License 2.0 5 votes vote down vote up
protected CsvTableSink createTableSink(
		Boolean isStreaming,
		Map<String, String> properties) {

	DescriptorProperties params = new DescriptorProperties();
	params.putProperties(properties);

	// validate
	new FileSystemValidator().validate(params);
	new OldCsvValidator().validate(params);
	new SchemaValidator(isStreaming, false, false).validate(params);

	// build
	TableSchema formatSchema = params.getTableSchema(FORMAT_FIELDS);
	TableSchema tableSchema = params.getTableSchema(SCHEMA);

	if (!formatSchema.equals(tableSchema)) {
		throw new TableException(
				"Encodings that differ from the schema are not supported yet for CsvTableSink.");
	}

	String path = params.getString(CONNECTOR_PATH);
	String fieldDelimiter = params.getOptionalString(FORMAT_FIELD_DELIMITER).orElse(",");

	CsvTableSink csvTableSink = new CsvTableSink(path, fieldDelimiter);

	return (CsvTableSink) csvTableSink.configure(formatSchema.getFieldNames(), formatSchema.getFieldTypes());
}
 
Example 25
Source Project: flink   Source File: KafkaTableSourceSinkFactoryBase.java    License: Apache License 2.0 5 votes vote down vote up
private StartupOptions getStartupOptions(
		DescriptorProperties descriptorProperties,
		String topic) {
	final Map<KafkaTopicPartition, Long> specificOffsets = new HashMap<>();
	final StartupMode startupMode = descriptorProperties
		.getOptionalString(CONNECTOR_STARTUP_MODE)
		.map(modeString -> {
			switch (modeString) {
				case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_EARLIEST:
					return StartupMode.EARLIEST;

				case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_LATEST:
					return StartupMode.LATEST;

				case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_GROUP_OFFSETS:
					return StartupMode.GROUP_OFFSETS;

				case KafkaValidator.CONNECTOR_STARTUP_MODE_VALUE_SPECIFIC_OFFSETS:
					final List<Map<String, String>> offsetList = descriptorProperties.getFixedIndexedProperties(
						CONNECTOR_SPECIFIC_OFFSETS,
						Arrays.asList(CONNECTOR_SPECIFIC_OFFSETS_PARTITION, CONNECTOR_SPECIFIC_OFFSETS_OFFSET));
					offsetList.forEach(kv -> {
						final int partition = descriptorProperties.getInt(kv.get(CONNECTOR_SPECIFIC_OFFSETS_PARTITION));
						final long offset = descriptorProperties.getLong(kv.get(CONNECTOR_SPECIFIC_OFFSETS_OFFSET));
						final KafkaTopicPartition topicPartition = new KafkaTopicPartition(topic, partition);
						specificOffsets.put(topicPartition, offset);
					});
					return StartupMode.SPECIFIC_OFFSETS;
				default:
					throw new TableException("Unsupported startup mode. Validator should have checked that.");
			}
		}).orElse(StartupMode.GROUP_OFFSETS);
	final StartupOptions options = new StartupOptions();
	options.startupMode = startupMode;
	options.specificOffsets = specificOffsets;
	return options;
}
 
Example 26
Source Project: flink   Source File: SqlDateTimeUtils.java    License: Apache License 2.0 5 votes vote down vote up
private static long convertExtract(TimeUnitRange range, long ts, LogicalType type, TimeZone tz) {
	TimeUnit startUnit = range.startUnit;
	long offset = tz.getOffset(ts);
	long utcTs = ts + offset;

	switch (startUnit) {
		case MILLENNIUM:
		case CENTURY:
		case YEAR:
		case QUARTER:
		case MONTH:
		case DAY:
		case DOW:
		case DOY:
		case WEEK:
			if (type instanceof TimestampType) {
				long d = divide(utcTs, TimeUnit.DAY.multiplier);
				return DateTimeUtils.unixDateExtract(range, d);
			} else if (type instanceof DateType) {
				return divide(utcTs, TimeUnit.DAY.multiplier);
			} else {
				// TODO support it
				throw new TableException(type + " is unsupported now.");
			}
		case DECADE:
			// TODO support it
			throw new TableException("DECADE is unsupported now.");
		case EPOCH:
			// TODO support it
			throw new TableException("EPOCH is unsupported now.");
		default:
			// fall through
	}

	long res = mod(utcTs, getFactory(startUnit));
	res = divide(res, startUnit.multiplier);
	return res;

}
 
Example 27
Source Project: flink   Source File: DatabaseCalciteSchema.java    License: Apache License 2.0 5 votes vote down vote up
private Table convertCatalogTable(ObjectPath tablePath, CatalogTable table) {
	TableSource<?> tableSource;
	Optional<TableFactory> tableFactory = catalog.getTableFactory();
	if (tableFactory.isPresent()) {
		TableFactory tf = tableFactory.get();
		if (tf instanceof TableSourceFactory) {
			tableSource = ((TableSourceFactory) tf).createTableSource(tablePath, table);
		} else {
			throw new TableException(String.format("Cannot query a sink-only table. TableFactory provided by catalog %s must implement TableSourceFactory",
				catalog.getClass()));
		}
	} else {
		tableSource = TableFactoryUtil.findAndCreateTableSource(table);
	}

	if (!(tableSource instanceof StreamTableSource)) {
		throw new TableException("Catalog tables support only StreamTableSource and InputFormatTableSource");
	}

	return new TableSourceTable<>(
		tableSource,
		// this means the TableSource extends from StreamTableSource, this is needed for the
		// legacy Planner. Blink Planner should use the information that comes from the TableSource
		// itself to determine if it is a streaming or batch source.
		isStreamingMode,
		FlinkStatistic.UNKNOWN()
	);
}
 
Example 28
Source Project: flink   Source File: PlanningConfigurationBuilder.java    License: Apache License 2.0 5 votes vote down vote up
private FlinkSqlConformance getSqlConformance() {
	SqlDialect sqlDialect = tableConfig.getSqlDialect();
	switch (sqlDialect) {
		case HIVE:
			return FlinkSqlConformance.HIVE;
		case DEFAULT:
			return FlinkSqlConformance.DEFAULT;
		default:
			throw new TableException("Unsupported SQL dialect: " + sqlDialect);
	}
}
 
Example 29
Source Project: flink   Source File: QueryOperationConverter.java    License: Apache License 2.0 5 votes vote down vote up
private LogicalWindow toLogicalWindow(ResolvedGroupWindow window) {
	TypeInformation<?> windowType = fromDataTypeToLegacyInfo(window.getTimeAttribute().getOutputDataType());
	WindowReference windowReference = new WindowReference(window.getAlias(), new Some<>(windowType));
	switch (window.getType()) {
		case SLIDE:
			return new SlidingGroupWindow(
				windowReference,
				expressionBridge.bridge(window.getTimeAttribute()),
				window.getSize().map(expressionBridge::bridge).get(),
				window.getSlide().map(expressionBridge::bridge).get()
			);
		case SESSION:
			return new SessionGroupWindow(
				windowReference,
				expressionBridge.bridge(window.getTimeAttribute()),
				window.getGap().map(expressionBridge::bridge).get()
			);
		case TUMBLE:
			return new TumblingGroupWindow(
				windowReference,
				expressionBridge.bridge(window.getTimeAttribute()),
				window.getSize().map(expressionBridge::bridge).get()
			);
		default:
			throw new TableException("Unknown window type");
	}
}
 
Example 30
Source Project: flink   Source File: TableFactoryUtil.java    License: Apache License 2.0 5 votes vote down vote up
private static <T> TableSink<T> findAndCreateTableSink(Map<String, String> properties) {
	TableSink tableSink;
	try {
		tableSink = TableFactoryService
			.find(TableSinkFactory.class, properties)
			.createTableSink(properties);
	} catch (Throwable t) {
		throw new TableException("findAndCreateTableSink failed.", t);
	}

	return tableSink;
}