org.apache.flink.table.api.TableConfig Java Examples

The following examples show how to use org.apache.flink.table.api.TableConfig. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: OperationTreeBuilder.java    From flink with Apache License 2.0 6 votes vote down vote up
private OperationTreeBuilder(
		TableConfig config,
		FunctionLookup functionLookup,
		DataTypeFactory typeFactory,
		TableReferenceLookup tableReferenceLookup,
		ProjectionOperationFactory projectionOperationFactory,
		SortOperationFactory sortOperationFactory,
		CalculatedTableFactory calculatedTableFactory,
		SetOperationFactory setOperationFactory,
		AggregateOperationFactory aggregateOperationFactory,
		JoinOperationFactory joinOperationFactory,
		ValuesOperationFactory valuesOperationFactory) {
	this.config = config;
	this.functionCatalog = functionLookup;
	this.typeFactory = typeFactory;
	this.tableReferenceLookup = tableReferenceLookup;
	this.projectionOperationFactory = projectionOperationFactory;
	this.sortOperationFactory = sortOperationFactory;
	this.calculatedTableFactory = calculatedTableFactory;
	this.setOperationFactory = setOperationFactory;
	this.aggregateOperationFactory = aggregateOperationFactory;
	this.joinOperationFactory = joinOperationFactory;
	this.valuesOperationFactory = valuesOperationFactory;
	this.lookupResolver = new LookupCallResolver(functionLookup);
}
 
Example #2
Source File: StreamTableEnvironmentImplTest.java    From flink with Apache License 2.0 6 votes vote down vote up
private StreamTableEnvironmentImpl getStreamTableEnvironment(
		StreamExecutionEnvironment env,
		DataStreamSource<Integer> elements) {
	TableConfig config = new TableConfig();
	CatalogManager catalogManager = CatalogManagerMocks.createEmptyCatalogManager();
	ModuleManager moduleManager = new ModuleManager();
	return new StreamTableEnvironmentImpl(
		catalogManager,
		moduleManager,
		new FunctionCatalog(config, catalogManager, moduleManager),
		config,
		env,
		new TestPlanner(elements.getTransformation()),
		new ExecutorMock(),
		true
	);
}
 
Example #3
Source File: TableEnvFactory.java    From zeppelin with Apache License 2.0 6 votes vote down vote up
public TableEnvironment createJavaFlinkBatchTableEnvironment() {
  try {
    Class<?> clazz = null;
    if (flinkVersion.isFlink110()) {
      clazz = Class
              .forName("org.apache.flink.table.api.java.internal.BatchTableEnvironmentImpl");
    } else {
      clazz = Class
              .forName("org.apache.flink.table.api.bridge.java.internal.BatchTableEnvironmentImpl");
    }

    Constructor con = clazz.getConstructor(
            ExecutionEnvironment.class,
            TableConfig.class,
            CatalogManager.class,
            ModuleManager.class);
    return (TableEnvironment) con.newInstance(
            benv.getJavaEnv(),
            tblConfig,
            catalogManager,
            moduleManager);
  } catch (Throwable t) {
    throw new TableException("Create BatchTableEnvironment failed.", t);
  }
}
 
Example #4
Source File: ExecutionContext.java    From flink with Apache License 2.0 6 votes vote down vote up
private TableConfig createTableConfig() {
	final TableConfig config = new TableConfig();
	config.addConfiguration(flinkConfig);
	Configuration conf = config.getConfiguration();
	environment.getConfiguration().asMap().forEach(conf::setString);
	ExecutionEntry execution = environment.getExecution();
	config.setIdleStateRetentionTime(
			Time.milliseconds(execution.getMinStateRetention()),
			Time.milliseconds(execution.getMaxStateRetention()));

	conf.set(CoreOptions.DEFAULT_PARALLELISM, execution.getParallelism());
	conf.set(PipelineOptions.MAX_PARALLELISM, execution.getMaxParallelism());
	conf.set(StreamPipelineOptions.TIME_CHARACTERISTIC, execution.getTimeCharacteristic());
	if (execution.getTimeCharacteristic() == TimeCharacteristic.EventTime) {
		conf.set(PipelineOptions.AUTO_WATERMARK_INTERVAL,
				Duration.ofMillis(execution.getPeriodicWatermarksInterval()));
	}

	setRestartStrategy(conf);
	return config;
}
 
Example #5
Source File: BatchExecutorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
public BatchExecutorTest() {
	batchExecutor = new BatchExecutor(LocalStreamEnvironment.getExecutionEnvironment());

	final Transformation testTransform = new LegacySourceTransformation<>(
		"MockTransform",
		new StreamSource<>(new SourceFunction<String>() {
			@Override
			public void run(SourceContext<String> ctx) {
			}

			@Override
			public void cancel() {
			}
		}),
		BasicTypeInfo.STRING_TYPE_INFO,
		1);
	Pipeline pipeline = batchExecutor.createPipeline(
		Collections.singletonList(testTransform), new TableConfig(), "Test Job");
	streamGraph = (StreamGraph) pipeline;
}
 
Example #6
Source File: KeySelectorUtil.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Create a RowDataKeySelector to extract keys from DataStream which type is RowDataTypeInfo.
 *
 * @param keyFields key fields
 * @param rowType type of DataStream to extract keys
 * @return the RowDataKeySelector to extract keys from DataStream which type is RowDataTypeInfo.
 */
public static RowDataKeySelector getRowDataSelector(int[] keyFields, RowDataTypeInfo rowType) {
	if (keyFields.length > 0) {
		LogicalType[] inputFieldTypes = rowType.getLogicalTypes();
		LogicalType[] keyFieldTypes = new LogicalType[keyFields.length];
		for (int i = 0; i < keyFields.length; ++i) {
			keyFieldTypes[i] = inputFieldTypes[keyFields[i]];
		}
		// do not provide field names for the result key type,
		// because we may have duplicate key fields and the field names may conflict
		RowType returnType = RowType.of(keyFieldTypes);
		RowType inputType = rowType.toRowType();
		GeneratedProjection generatedProjection = ProjectionCodeGenerator.generateProjection(
			CodeGeneratorContext.apply(new TableConfig()),
			"KeyProjection",
			inputType,
			returnType,
			keyFields);
		RowDataTypeInfo keyRowType = RowDataTypeInfo.of(returnType);
		return new BinaryRowDataKeySelector(keyRowType, generatedProjection);
	} else {
		return EmptyRowDataKeySelector.INSTANCE;
	}
}
 
Example #7
Source File: TableEnvFactory.java    From zeppelin with Apache License 2.0 6 votes vote down vote up
public TableEnvironment createScalaFlinkBatchTableEnvironment() {
  try {
    Class clazz = null;
    if (flinkVersion.isFlink110()) {
      clazz = Class
              .forName("org.apache.flink.table.api.scala.internal.BatchTableEnvironmentImpl");
    } else {
      clazz = Class
              .forName("org.apache.flink.table.api.bridge.scala.internal.BatchTableEnvironmentImpl");
    }
    Constructor constructor = clazz
            .getConstructor(
                    org.apache.flink.api.scala.ExecutionEnvironment.class,
                    TableConfig.class,
                    CatalogManager.class,
                    ModuleManager.class);

    return (TableEnvironment)
            constructor.newInstance(benv, tblConfig, catalogManager, moduleManager);
  } catch (Exception e) {
    throw new TableException("Fail to createScalaFlinkBatchTableEnvironment", e);
  }
}
 
Example #8
Source File: KeySelectorUtil.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Create a BaseRowKeySelector to extract keys from DataStream which type is BaseRowTypeInfo.
 *
 * @param keyFields key fields
 * @param rowType type of DataStream to extract keys
 * @return the BaseRowKeySelector to extract keys from DataStream which type is BaseRowTypeInfo.
 */
public static BaseRowKeySelector getBaseRowSelector(int[] keyFields, BaseRowTypeInfo rowType) {
	if (keyFields.length > 0) {
		LogicalType[] inputFieldTypes = rowType.getLogicalTypes();
		String[] inputFieldNames = rowType.getFieldNames();
		LogicalType[] keyFieldTypes = new LogicalType[keyFields.length];
		String[] keyFieldNames = new String[keyFields.length];
		for (int i = 0; i < keyFields.length; ++i) {
			keyFieldTypes[i] = inputFieldTypes[keyFields[i]];
			keyFieldNames[i] = inputFieldNames[keyFields[i]];
		}
		RowType returnType = RowType.of(keyFieldTypes, keyFieldNames);
		RowType inputType = RowType.of(inputFieldTypes, rowType.getFieldNames());
		GeneratedProjection generatedProjection = ProjectionCodeGenerator.generateProjection(
			CodeGeneratorContext.apply(new TableConfig()),
			"KeyProjection",
			inputType,
			returnType, keyFields);
		BaseRowTypeInfo keyRowType = BaseRowTypeInfo.of(returnType);
		return new BinaryRowKeySelector(keyRowType, generatedProjection);
	} else {
		return NullBinaryRowKeySelector.INSTANCE;
	}
}
 
Example #9
Source File: PlanningConfigurationBuilder.java    From flink with Apache License 2.0 6 votes vote down vote up
public PlanningConfigurationBuilder(
		TableConfig tableConfig,
		FunctionCatalog functionCatalog,
		CalciteSchema rootSchema,
		ExpressionBridge<PlannerExpression> expressionBridge) {
	this.tableConfig = tableConfig;
	this.functionCatalog = functionCatalog;

	// the converter is needed when calling temporal table functions from SQL, because
	// they reference a history table represented with a tree of table operations.
	this.context = Contexts.of(expressionBridge, tableConfig);

	this.planner = new VolcanoPlanner(costFactory, context);
	planner.setExecutor(new ExpressionReducer(tableConfig));
	planner.addRelTraitDef(ConventionTraitDef.INSTANCE);

	this.expressionBridge = expressionBridge;

	this.rootSchema = rootSchema;
}
 
Example #10
Source File: TableEnvFactory.java    From zeppelin with Apache License 2.0 6 votes vote down vote up
public TableEnvFactory(FlinkVersion flinkVersion,
                       FlinkShims flinkShims,
                       org.apache.flink.api.scala.ExecutionEnvironment env,
                       org.apache.flink.streaming.api.scala.StreamExecutionEnvironment senv,
                       TableConfig tblConfig,
                       CatalogManager catalogManager,
                       ModuleManager moduleManager,
                       FunctionCatalog flinkFunctionCatalog,
                       FunctionCatalog blinkFunctionCatalog) {
  this.flinkVersion = flinkVersion;
  this.flinkShims = flinkShims;
  this.benv = env;
  this.senv = senv;
  this.tblConfig = tblConfig;
  this.catalogManager = catalogManager;
  this.moduleManager = moduleManager;
  this.flinkFunctionCatalog = flinkFunctionCatalog;
  this.blinkFunctionCatalog = blinkFunctionCatalog;
}
 
Example #11
Source File: DatabaseCalciteSchemaTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Test
public void testCatalogTable() throws TableAlreadyExistException, DatabaseNotExistException {
	GenericInMemoryCatalog catalog = new GenericInMemoryCatalog(catalogName, databaseName);
	CatalogManager catalogManager = CatalogManagerMocks.preparedCatalogManager()
		.defaultCatalog(catalogName, catalog)
		.build();
	catalogManager.setCatalogTableSchemaResolver(new CatalogTableSchemaResolver(new ParserMock(), true));

	DatabaseCalciteSchema calciteSchema = new DatabaseCalciteSchema(
		true,
		databaseName,
		catalogName,
		catalogManager,
		new TableConfig());

	catalog.createTable(new ObjectPath(databaseName, tableName), new TestCatalogBaseTable(), false);
	Table table = calciteSchema.getTable(tableName);

	assertThat(table, instanceOf(TableSourceTable.class));
	TableSourceTable tableSourceTable = (TableSourceTable) table;
	assertThat(tableSourceTable.tableSource(), instanceOf(TestExternalTableSource.class));
	assertThat(tableSourceTable.isStreamingMode(), is(true));
}
 
Example #12
Source File: GroupingSetsITCase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Before
public void setupTables() {
	ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
	tableEnv = BatchTableEnvironment.create(env, new TableConfig());

	DataSet<Tuple3<Integer, Long, String>> dataSet = CollectionDataSets.get3TupleDataSet(env);
	tableEnv.registerDataSet(TABLE_NAME, dataSet);

	MapOperator<Tuple3<Integer, Long, String>, Tuple3<Integer, Long, String>> dataSetWithNulls =
		dataSet.map(new MapFunction<Tuple3<Integer, Long, String>, Tuple3<Integer, Long, String>>() {

			@Override
			public Tuple3<Integer, Long, String> map(Tuple3<Integer, Long, String> value) throws Exception {
				if (value.f2.toLowerCase().contains("world")) {
					value.f2 = null;
				}
				return value;
			}
		});
	tableEnv.registerDataSet(TABLE_WITH_NULLS_NAME, dataSetWithNulls);
}
 
Example #13
Source File: TableEnvironmentImpl.java    From flink with Apache License 2.0 6 votes vote down vote up
protected TableEnvironmentImpl(
		CatalogManager catalogManager,
		TableConfig tableConfig,
		Executor executor,
		FunctionCatalog functionCatalog,
		Planner planner,
		boolean isStreamingMode) {
	this.catalogManager = catalogManager;
	this.execEnv = executor;

	this.tableConfig = tableConfig;

	this.functionCatalog = functionCatalog;
	this.planner = planner;
	this.operationTreeBuilder = OperationTreeBuilder.create(
		functionCatalog,
		path -> {
			Optional<CatalogQueryOperation> catalogTableOperation = scanInternal(path);
			return catalogTableOperation.map(tableOperation -> new TableReferenceExpression(path, tableOperation));
		},
		isStreamingMode
	);
}
 
Example #14
Source File: LongHashJoinGeneratorTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public Object newOperator(long memorySize, HashJoinType type, boolean reverseJoinFunction) {
	RowType keyType = RowType.of(new IntType());
	Assert.assertTrue(LongHashJoinGenerator.support(type, keyType, new boolean[] {true}));
	return LongHashJoinGenerator.gen(
			new TableConfig(), type,
			keyType,
			RowType.of(new IntType(), new IntType()),
			RowType.of(new IntType(), new IntType()),
			new int[]{0},
			new int[]{0},
			20, 10000,
			reverseJoinFunction,
			new GeneratedJoinCondition(MyJoinCondition.class.getCanonicalName(), "", new Object[0])
	);
}
 
Example #15
Source File: PlannerContext.java    From flink with Apache License 2.0 6 votes vote down vote up
public PlannerContext(
		TableConfig tableConfig,
		FunctionCatalog functionCatalog,
		CalciteSchema rootSchema,
		List<RelTraitDef> traitDefs) {
	this.tableConfig = tableConfig;
	this.functionCatalog = functionCatalog;
	this.context = new FlinkContextImpl(tableConfig, functionCatalog);
	this.rootSchema = rootSchema;
	this.traitDefs = traitDefs;
	// Make a framework config to initialize the RelOptCluster instance,
	// caution that we can only use the attributes that can not be overwrite/configured
	// by user.
	final FrameworkConfig frameworkConfig = createFrameworkConfig();

	RelOptPlanner planner = new VolcanoPlanner(frameworkConfig.getCostFactory(), frameworkConfig.getContext());
	planner.setExecutor(frameworkConfig.getExecutor());
	for (RelTraitDef traitDef : frameworkConfig.getTraitDefs()) {
		planner.addRelTraitDef(traitDef);
	}
	this.cluster = FlinkRelOptClusterFactory.create(planner, new RexBuilder(typeFactory));
}
 
Example #16
Source File: PlanningConfigurationBuilder.java    From flink with Apache License 2.0 6 votes vote down vote up
public PlanningConfigurationBuilder(
		TableConfig tableConfig,
		FunctionCatalog functionCatalog,
		CalciteSchema rootSchema,
		ExpressionBridge<PlannerExpression> expressionBridge) {
	this.tableConfig = tableConfig;
	this.functionCatalog = functionCatalog;

	// the converter is needed when calling temporal table functions from SQL, because
	// they reference a history table represented with a tree of table operations
	this.context = Contexts.of(expressionBridge);

	this.planner = new VolcanoPlanner(costFactory, context);
	planner.setExecutor(new ExpressionReducer(tableConfig));
	planner.addRelTraitDef(ConventionTraitDef.INSTANCE);

	this.expressionBridge = expressionBridge;

	this.rootSchema = rootSchema;
}
 
Example #17
Source File: ExecutionContext.java    From flink with Apache License 2.0 6 votes vote down vote up
private static TableEnvironment createStreamTableEnvironment(
		StreamExecutionEnvironment env,
		EnvironmentSettings settings,
		TableConfig config,
		Executor executor,
		CatalogManager catalogManager,
		ModuleManager moduleManager,
		FunctionCatalog functionCatalog) {

	final Map<String, String> plannerProperties = settings.toPlannerProperties();
	final Planner planner = ComponentFactoryService.find(PlannerFactory.class, plannerProperties)
		.create(plannerProperties, executor, config, functionCatalog, catalogManager);

	return new StreamTableEnvironmentImpl(
		catalogManager,
		moduleManager,
		functionCatalog,
		config,
		env,
		planner,
		executor,
		settings.isStreamingMode());
}
 
Example #18
Source File: TableConfigUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Returns whether the given operator type is disabled.
 *
 * @param tableConfig TableConfig object
 * @param operatorType operator type to check
 * @return true if the given operator is disabled.
 */
public static boolean isOperatorDisabled(TableConfig tableConfig, OperatorType operatorType) {
	String value = tableConfig.getConfiguration().getString(TABLE_EXEC_DISABLED_OPERATORS);
	if (value == null) {
		return false;
	}
	String[] operators = value.split(",");
	Set<OperatorType> operatorSets = new HashSet<>();
	for (String operator : operators) {
		operator = operator.trim();
		if (operator.isEmpty()) {
			continue;
		}
		if (operator.equals("HashJoin")) {
			operatorSets.add(OperatorType.BroadcastHashJoin);
			operatorSets.add(OperatorType.ShuffleHashJoin);
		} else {
			operatorSets.add(OperatorType.valueOf(operator));
		}
	}
	return operatorSets.contains(operatorType);
}
 
Example #19
Source File: OperationTreeBuilder.java    From flink with Apache License 2.0 6 votes vote down vote up
public static OperationTreeBuilder create(
		TableConfig config,
		FunctionLookup functionCatalog,
		DataTypeFactory typeFactory,
		TableReferenceLookup tableReferenceLookup,
		boolean isStreamingMode) {
	return new OperationTreeBuilder(
		config,
		functionCatalog,
		typeFactory,
		tableReferenceLookup,
		new ProjectionOperationFactory(),
		new SortOperationFactory(isStreamingMode),
		new CalculatedTableFactory(),
		new SetOperationFactory(isStreamingMode),
		new AggregateOperationFactory(isStreamingMode),
		new JoinOperationFactory(),
		new ValuesOperationFactory()
	);
}
 
Example #20
Source File: TableEnvironmentImpl.java    From flink with Apache License 2.0 5 votes vote down vote up
protected TableEnvironmentImpl(
		CatalogManager catalogManager,
		ModuleManager moduleManager,
		TableConfig tableConfig,
		Executor executor,
		FunctionCatalog functionCatalog,
		Planner planner,
		boolean isStreamingMode) {
	this.catalogManager = catalogManager;
	this.catalogManager.setCatalogTableSchemaResolver(
			new CatalogTableSchemaResolver(planner.getParser(), isStreamingMode));
	this.moduleManager = moduleManager;
	this.execEnv = executor;

	this.tableConfig = tableConfig;

	this.functionCatalog = functionCatalog;
	this.planner = planner;
	this.parser = planner.getParser();
	this.isStreamingMode = isStreamingMode;
	this.operationTreeBuilder = OperationTreeBuilder.create(
		tableConfig,
		functionCatalog.asLookup(parser::parseIdentifier),
		catalogManager.getDataTypeFactory(),
		path -> {
			try {
				UnresolvedIdentifier unresolvedIdentifier = parser.parseIdentifier(path);
				Optional<CatalogQueryOperation> catalogQueryOperation = scanInternal(unresolvedIdentifier);
				return catalogQueryOperation.map(t -> ApiExpressionUtils.tableRef(path, t));
			} catch (SqlParserException ex) {
				// The TableLookup is used during resolution of expressions and it actually might not be an
				// identifier of a table. It might be a reference to some other object such as column, local
				// reference etc. This method should return empty optional in such cases to fallback for other
				// identifiers resolution.
				return Optional.empty();
			}
		},
		isStreamingMode
	);
}
 
Example #21
Source File: FunctionCatalog.java    From flink with Apache License 2.0 5 votes vote down vote up
public FunctionCatalog(
		TableConfig config,
		CatalogManager catalogManager,
		ModuleManager moduleManager) {
	this.config = checkNotNull(config).getConfiguration();
	this.catalogManager = checkNotNull(catalogManager);
	this.moduleManager = checkNotNull(moduleManager);
}
 
Example #22
Source File: ExpressionResolver.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a builder for {@link ExpressionResolver}. One can add additional properties to the resolver
 * like e.g. {@link GroupWindow} or {@link OverWindow}. You can also add additional {@link ResolverRule}.
 *
 * @param config general configuration
 * @param tableCatalog a way to lookup a table reference by name
 * @param functionLookup a way to lookup call by name
 * @param typeFactory a way to lookup and create data types
 * @param inputs inputs to use for field resolution
 * @return builder for resolver
 */
public static ExpressionResolverBuilder resolverFor(
		TableConfig config,
		TableReferenceLookup tableCatalog,
		FunctionLookup functionLookup,
		DataTypeFactory typeFactory,
		QueryOperation... inputs) {
	return new ExpressionResolverBuilder(
		inputs,
		config,
		tableCatalog,
		functionLookup,
		typeFactory);
}
 
Example #23
Source File: TableConfigUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Returns time in milli second.
 *
 * @param tableConfig TableConfig object
 * @param config config to fetch
 * @return time in milli second.
 */
public static Long getMillisecondFromConfigDuration(TableConfig tableConfig, ConfigOption<String> config) {
	String timeStr = tableConfig.getConfiguration().getString(config);
	if (timeStr != null) {
		Duration duration = Duration.create(timeStr);
		if (duration.isFinite()) {
			return duration.toMillis();
		} else {
			throw new IllegalArgumentException(config.key() + " must be finite.");
		}
	} else {
		return null;
	}
}
 
Example #24
Source File: StreamPlannerFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public Planner create(
	Map<String, String> properties,
	Executor executor,
	TableConfig tableConfig,
	FunctionCatalog functionCatalog,
	CatalogManager catalogManager) {
	return new StreamPlanner(executor, tableConfig, functionCatalog, catalogManager);
}
 
Example #25
Source File: TableConfigUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the aggregate phase strategy configuration.
 *
 * @param tableConfig TableConfig object
 * @return the aggregate phase strategy
 */
public static AggregatePhaseStrategy getAggPhaseStrategy(TableConfig tableConfig) {
	String aggPhaseConf = tableConfig.getConfiguration().getString(TABLE_OPTIMIZER_AGG_PHASE_STRATEGY).trim();
	if (aggPhaseConf.isEmpty()) {
		return AggregatePhaseStrategy.AUTO;
	} else {
		return AggregatePhaseStrategy.valueOf(aggPhaseConf);
	}
}
 
Example #26
Source File: ExecutorUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Sets batch properties for {@link StreamGraph}.
 */
public static void setBatchProperties(StreamGraph streamGraph, TableConfig tableConfig) {
	streamGraph.getStreamNodes().forEach(
			sn -> sn.setResources(ResourceSpec.UNKNOWN, ResourceSpec.UNKNOWN));
	streamGraph.setChaining(true);
	streamGraph.setAllVerticesInSameSlotSharingGroupByDefault(false);
	streamGraph.setScheduleMode(ScheduleMode.LAZY_FROM_SOURCES_WITH_BATCH_SLOT_REQUEST);
	streamGraph.setStateBackend(null);
	if (streamGraph.getCheckpointConfig().isCheckpointingEnabled()) {
		throw new IllegalArgumentException("Checkpoint is not supported for batch jobs.");
	}
	streamGraph.setGlobalDataExchangeMode(getGlobalDataExchangeMode(tableConfig));
}
 
Example #27
Source File: BatchTableEnvironment.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Returns a {@link TableEnvironment} for a Java batch {@link ExecutionEnvironment} that works
 * with {@link DataSet}s.
 *
 * <p>A TableEnvironment can be used to:
 * <ul>
 *     <li>convert a {@link DataSet} to a {@link Table}</li>
 *     <li>register a {@link DataSet} in the {@link TableEnvironment}'s catalog</li>
 *     <li>register a {@link Table} in the {@link TableEnvironment}'s catalog</li>
 *     <li>scan a registered table to obtain a {@link Table}</li>
 *     <li>specify a SQL query on registered tables to obtain a {@link Table}</li>
 *     <li>convert a {@link Table} into a {@link DataSet}</li>
 *     <li>explain the AST and execution plan of a {@link Table}</li>
 * </ul>
 *
 * @param executionEnvironment The Java batch {@link ExecutionEnvironment} of the TableEnvironment.
 * @param tableConfig The configuration of the TableEnvironment.
 */
static BatchTableEnvironment create(ExecutionEnvironment executionEnvironment, TableConfig tableConfig) {
	try {
		// temporary solution until FLINK-15635 is fixed
		ClassLoader classLoader = Thread.currentThread().getContextClassLoader();

		ModuleManager moduleManager = new ModuleManager();

		String defaultCatalog = "default_catalog";
		CatalogManager catalogManager = CatalogManager.newBuilder()
			.classLoader(classLoader)
			.config(tableConfig.getConfiguration())
			.defaultCatalog(
				defaultCatalog,
				new GenericInMemoryCatalog(defaultCatalog, "default_database"))
			.executionConfig(executionEnvironment.getConfig())
			.build();

		Class<?> clazz = Class.forName("org.apache.flink.table.api.bridge.java.internal.BatchTableEnvironmentImpl");
		Constructor<?> con = clazz.getConstructor(
			ExecutionEnvironment.class,
			TableConfig.class,
			CatalogManager.class,
			ModuleManager.class);
		return (BatchTableEnvironment) con.newInstance(executionEnvironment, tableConfig, catalogManager, moduleManager);
	} catch (Throwable t) {
		throw new TableException("Create BatchTableEnvironment failed.", t);
	}
}
 
Example #28
Source File: ValuesOperationTreeBuilderTest.java    From flink with Apache License 2.0 5 votes vote down vote up
public OperationTreeBuilder getTreeBuilder() {
	return OperationTreeBuilder.create(
		new TableConfig(),
		new FunctionLookupMock(Collections.emptyMap()),
		new DataTypeFactoryMock(),
		name -> Optional.empty(), // do not support
		true
	);
}
 
Example #29
Source File: ExecutionContext.java    From flink with Apache License 2.0 5 votes vote down vote up
private static TableEnvironment createStreamTableEnvironment(
		StreamExecutionEnvironment env,
		EnvironmentSettings settings,
		Executor executor) {

	final TableConfig config = TableConfig.getDefault();

	final CatalogManager catalogManager = new CatalogManager(
		settings.getBuiltInCatalogName(),
		new GenericInMemoryCatalog(settings.getBuiltInCatalogName(), settings.getBuiltInDatabaseName()));

	final FunctionCatalog functionCatalog = new FunctionCatalog(catalogManager);

	final Map<String, String> plannerProperties = settings.toPlannerProperties();
	final Planner planner = ComponentFactoryService.find(PlannerFactory.class, plannerProperties)
		.create(plannerProperties, executor, config, functionCatalog, catalogManager);

	return new StreamTableEnvironmentImpl(
		catalogManager,
		functionCatalog,
		config,
		env,
		planner,
		executor,
		settings.isStreamingMode()
	);
}
 
Example #30
Source File: TableEnvironmentExample1.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) {
    //流作业
    StreamTableEnvironment.create(StreamExecutionEnvironment.getExecutionEnvironment());
    //批作业
    BatchTableEnvironment.create(ExecutionEnvironment.getExecutionEnvironment());
    //use EnvironmentSettings
    StreamTableEnvironment.create(StreamExecutionEnvironment.getExecutionEnvironment(), EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build());
    StreamTableEnvironment.create(StreamExecutionEnvironment.getExecutionEnvironment(), EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build());
    //use table config
    StreamTableEnvironment.create(StreamExecutionEnvironment.getExecutionEnvironment(), TableConfig.getDefault());
}