org.apache.calcite.sql.util.ChainedSqlOperatorTable Java Examples

The following examples show how to use org.apache.calcite.sql.util.ChainedSqlOperatorTable. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PlanningConfigurationBuilder.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Returns the operator table for this environment including a custom Calcite configuration.
 */
private SqlOperatorTable getSqlOperatorTable(CalciteConfig calciteConfig, FunctionCatalog functionCatalog) {
	SqlOperatorTable baseOperatorTable = ChainedSqlOperatorTable.of(
		new BasicOperatorTable(),
		new FunctionCatalogOperatorTable(functionCatalog, typeFactory)
	);

	return JavaScalaConversionUtil.toJava(calciteConfig.sqlOperatorTable()).map(operatorTable -> {
			if (calciteConfig.replacesSqlOperatorTable()) {
				return operatorTable;
			} else {
				return ChainedSqlOperatorTable.of(baseOperatorTable, operatorTable);
			}
		}
	).orElse(baseOperatorTable);
}
 
Example #2
Source File: SqlConverter.java    From Bats with Apache License 2.0 6 votes vote down vote up
public SqlConverter(QueryContext context) {
  this.settings = context.getPlannerSettings();
  this.util = context;
  this.functions = context.getFunctionRegistry();
  this.parserConfig = new DrillParserConfig(settings);
  this.sqlToRelConverterConfig = new SqlToRelConverterConfig();
  this.isInnerQuery = false;
  this.typeFactory = new JavaTypeFactoryImpl(DRILL_TYPE_SYSTEM);
  this.defaultSchema =  context.getNewDefaultSchema();
  this.rootSchema = rootSchema(defaultSchema);
  this.temporarySchema = context.getConfig().getString(ExecConstants.DEFAULT_TEMPORARY_WORKSPACE);
  this.session = context.getSession();
  this.drillConfig = context.getConfig();
  this.catalog = new DrillCalciteCatalogReader(
      rootSchema,
      parserConfig.caseSensitive(),
      DynamicSchema.from(defaultSchema).path(null),
      typeFactory,
      drillConfig,
      session);
  this.opTab = new ChainedSqlOperatorTable(Arrays.asList(context.getDrillOperatorTable(), catalog));
  this.costFactory = (settings.useDefaultCosting()) ? null : new DrillCostBase.DrillCostFactory();
  this.validator = new DrillValidator(opTab, catalog, typeFactory, parserConfig.conformance());
  validator.setIdentifierExpansion(true);
  cluster = null;
}
 
Example #3
Source File: PlanningConfigurationBuilder.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Returns the operator table for this environment including a custom Calcite configuration.
 */
private SqlOperatorTable getSqlOperatorTable(CalciteConfig calciteConfig, FunctionCatalog functionCatalog) {
	SqlOperatorTable baseOperatorTable = ChainedSqlOperatorTable.of(
		new BasicOperatorTable(),
		new FunctionCatalogOperatorTable(functionCatalog, typeFactory)
	);

	return JavaScalaConversionUtil.toJava(calciteConfig.sqlOperatorTable()).map(operatorTable -> {
			if (calciteConfig.replacesSqlOperatorTable()) {
				return operatorTable;
			} else {
				return ChainedSqlOperatorTable.of(baseOperatorTable, operatorTable);
			}
		}
	).orElse(baseOperatorTable);
}
 
Example #4
Source File: SqlWorker.java    From quark with Apache License 2.0 6 votes vote down vote up
private Planner buildPlanner(QueryContext context) {
  final List<RelTraitDef> traitDefs = new ArrayList<RelTraitDef>();
  traitDefs.add(ConventionTraitDef.INSTANCE);
  traitDefs.add(RelCollationTraitDef.INSTANCE);
  final ChainedSqlOperatorTable opTab =
      new ChainedSqlOperatorTable(
          ImmutableList.of(SqlStdOperatorTable.instance(),
              HiveSqlOperatorTable.instance(), catalogReader));
  FrameworkConfig config = Frameworks.newConfigBuilder() //
      .parserConfig(SqlParser.configBuilder()
          .setQuotedCasing(Casing.UNCHANGED)
          .setUnquotedCasing(Casing.TO_UPPER)
          .setQuoting(Quoting.DOUBLE_QUOTE)
          .build()) //
      .defaultSchema(context.getDefaultSchema()) //
      .operatorTable(opTab) //
      .traitDefs(traitDefs) //
      .convertletTable(StandardConvertletTable.INSTANCE)//
      .programs(getPrograms()) //
      .typeSystem(RelDataTypeSystem.DEFAULT) //
      .build();
  return Frameworks.getPlanner(config);
}
 
Example #5
Source File: PlannerTest.java    From calcite with Apache License 2.0 5 votes vote down vote up
@Test void testValidateUserDefinedAggregate() throws Exception {
  final SqlStdOperatorTable stdOpTab = SqlStdOperatorTable.instance();
  SqlOperatorTable opTab =
      ChainedSqlOperatorTable.of(stdOpTab,
          new ListSqlOperatorTable(
              ImmutableList.of(new MyCountAggFunction())));
  final SchemaPlus rootSchema = Frameworks.createRootSchema(true);
  final FrameworkConfig config = Frameworks.newConfigBuilder()
      .defaultSchema(
          CalciteAssert.addSchema(rootSchema, CalciteAssert.SchemaSpec.HR))
      .operatorTable(opTab)
      .build();
  final Planner planner = Frameworks.getPlanner(config);
  SqlNode parse =
      planner.parse("select \"deptno\", my_count(\"empid\") from \"emps\"\n"
          + "group by \"deptno\"");
  assertThat(Util.toLinux(parse.toString()),
      equalTo("SELECT `deptno`, `MY_COUNT`(`empid`)\n"
          + "FROM `emps`\n"
          + "GROUP BY `deptno`"));

  // MY_COUNT is recognized as an aggregate function, and therefore it is OK
  // that its argument empid is not in the GROUP BY clause.
  SqlNode validate = planner.validate(parse);
  assertThat(validate, notNullValue());

  // The presence of an aggregate function in the SELECT clause causes it
  // to become an aggregate query. Non-aggregate expressions become illegal.
  planner.close();
  planner.reset();
  parse = planner.parse("select \"deptno\", count(1) from \"emps\"");
  try {
    validate = planner.validate(parse);
    fail("expected exception, got " + validate);
  } catch (ValidationException e) {
    assertThat(e.getCause().getCause().getMessage(),
        containsString("Expression 'deptno' is not being grouped"));
  }
}
 
Example #6
Source File: PlannerImpl.java    From calcite with Apache License 2.0 5 votes vote down vote up
private SqlValidator createSqlValidator(CalciteCatalogReader catalogReader) {
  final SqlOperatorTable opTab =
      ChainedSqlOperatorTable.of(operatorTable, catalogReader);
  return new CalciteSqlValidator(opTab,
      catalogReader,
      typeFactory,
      sqlValidatorConfig
          .withDefaultNullCollation(connectionConfig.defaultNullCollation())
          .withLenientOperatorLookup(connectionConfig.lenientOperatorLookup())
          .withSqlConformance(connectionConfig.conformance())
          .withIdentifierExpansion(true));
}
 
Example #7
Source File: PlannerContext.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Returns builtin the operator table and external the operator for this environment.
 */
private SqlOperatorTable getBuiltinSqlOperatorTable() {
	return ChainedSqlOperatorTable.of(
			new FunctionCatalogOperatorTable(
					context.getFunctionCatalog(),
					context.getCatalogManager().getDataTypeFactory(),
					typeFactory),
			FlinkSqlOperatorTable.instance());
}
 
Example #8
Source File: PlannerContext.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the operator table for this environment including a custom Calcite configuration.
 */
private SqlOperatorTable getSqlOperatorTable(CalciteConfig calciteConfig) {
	return JavaScalaConversionUtil.<SqlOperatorTable>toJava(calciteConfig.getSqlOperatorTable()).map(operatorTable -> {
				if (calciteConfig.replacesSqlOperatorTable()) {
					return operatorTable;
				} else {
					return ChainedSqlOperatorTable.of(getBuiltinSqlOperatorTable(), operatorTable);
				}
			}
	).orElseGet(this::getBuiltinSqlOperatorTable);
}
 
Example #9
Source File: TestCompilerUtils.java    From streamline with Apache License 2.0 5 votes vote down vote up
public static CalciteState sqlOverDummyTable(String sql)
        throws RelConversionException, ValidationException, SqlParseException {
    SchemaPlus schema = Frameworks.createRootSchema(true);
    JavaTypeFactory typeFactory = new JavaTypeFactoryImpl
            (RelDataTypeSystem.DEFAULT);
    StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory)
            .field("ID", SqlTypeName.INTEGER)
            .field("NAME", typeFactory.createType(String.class))
            .field("ADDR", typeFactory.createType(String.class))
            .build();
    Table table = streamableTable.stream();
    schema.add("FOO", table);
    schema.add("BAR", table);
    schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));

    List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
    sqlOperatorTables.add(SqlStdOperatorTable.instance());
    sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
            false,
            Collections.<String>emptyList(), typeFactory));
    SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
    FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(
            schema).operatorTable(chainedSqlOperatorTable).build();
    Planner planner = Frameworks.getPlanner(config);
    SqlNode parse = planner.parse(sql);
    SqlNode validate = planner.validate(parse);
    RelNode tree = planner.convert(validate);
    System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
    return new CalciteState(schema, tree);
}
 
Example #10
Source File: StreamlineSqlImpl.java    From streamline with Apache License 2.0 5 votes vote down vote up
private FrameworkConfig buildFrameWorkConfig() {
  if (hasUdf) {
    List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
    sqlOperatorTables.add(SqlStdOperatorTable.instance());
    sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
                                                   false,
                                                   Collections.<String>emptyList(), typeFactory));
    return Frameworks.newConfigBuilder().defaultSchema(schema)
            .operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables)).build();
  } else {
    return Frameworks.newConfigBuilder().defaultSchema(schema).build();
  }
}
 
Example #11
Source File: SQLExecEnvironment.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
/**
 * Method method build a calcite framework configuration for calcite to parse SQL and generate relational tree
 * out of it.
 * @return FrameworkConfig
 */
private FrameworkConfig buildFrameWorkConfig()
{
  List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
  sqlOperatorTables.add(SqlStdOperatorTable.instance());
  sqlOperatorTables
    .add(new CalciteCatalogReader(CalciteSchema.from(schema), false, Collections.<String>emptyList(), typeFactory));
  return Frameworks.newConfigBuilder().defaultSchema(schema)
    .parserConfig(SqlParser.configBuilder().setLex(Lex.MYSQL).build())
    .operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables)).build();
}
 
Example #12
Source File: SqlConverter.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
public SqlConverter(
    final PlannerSettings settings,
    final SqlOperatorTable operatorTable,
    final FunctionContext functionContext,
    final MaterializationDescriptorProvider materializationProvider,
    final FunctionImplementationRegistry functions,
    final UserSession session,
    final AttemptObserver observer,
    final Catalog catalog,
    final SubstitutionProviderFactory factory,
    final SabotConfig config,
    final ScanResult scanResult
    ) {
  this.nestingLevel = 0;
  this.flattenCounter = new FlattenOpCounter();
  this.observer = observer;
  this.settings = settings;
  this.functionContext = functionContext;
  this.functions = functions;
  this.session = Preconditions.checkNotNull(session, "user session is required");
  this.parserConfig = ParserConfig.newInstance(session, settings);
  this.isInnerQuery = false;
  this.typeFactory = JavaTypeFactoryImpl.INSTANCE;
  this.catalogReader = new DremioCatalogReader(catalog, typeFactory);
  this.opTab = new ChainedSqlOperatorTable(ImmutableList.<SqlOperatorTable>of(operatorTable, this.catalogReader));
  this.costFactory = (settings.useDefaultCosting()) ? null : new DremioCost.Factory();
  this.validator = new SqlValidatorImpl(flattenCounter, opTab, this.catalogReader, typeFactory, DremioSqlConformance.INSTANCE);
  validator.setIdentifierExpansion(true);
  this.materializations = new MaterializationList(this, session, materializationProvider);
  this.substitutions = AccelerationAwareSubstitutionProvider.of(factory.getSubstitutionProvider(config, materializations, this.settings.options));
  this.planner = DremioVolcanoPlanner.of(this);
  this.cluster = RelOptCluster.create(planner, new DremioRexBuilder(typeFactory));
  this.cluster.setMetadataProvider(DefaultRelMetadataProvider.INSTANCE);
  this.viewExpansionContext = new ViewExpansionContext(session.getCredentials().getUserName());
  this.config = config;
  this.scanResult = scanResult;
}
 
Example #13
Source File: SQLAnalyzerFactory.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
/**
 * Factory method to create the SQLAnalyzer using the appropriate implementation of SqlValidatorWithHints.
 *
 * If createForSqlSuggestions is true, construct a SqlAdvisorValidator instance,
 * otherwise construct a SqlValidatorImpl instance. Inject this into the constructor
 * for a SQLAnalyzer object.
 *
 * @param username
 * @param sabotContext
 * @param context
 * @param createForSqlSuggestions
 * @return SQLAnalyzer instance
 */
public static SQLAnalyzer createSQLAnalyzer(final String username,
                                            final SabotContext sabotContext,
                                            final List<String> context,
                                            final boolean createForSqlSuggestions,
                                            ProjectOptionManager projectOptionManager) {
  final ViewExpansionContext viewExpansionContext = new ViewExpansionContext(username);
  final OptionManager optionManager = OptionManagerWrapper.Builder.newBuilder()
    .withOptionManager(new DefaultOptionManager(sabotContext.getOptionValidatorListing()))
    .withOptionManager(new EagerCachingOptionManager(projectOptionManager))
    .withOptionManager(new QueryOptionManager(sabotContext.getOptionValidatorListing()))
    .build();
  final NamespaceKey defaultSchemaPath = context == null ? null : new NamespaceKey(context);

  final SchemaConfig newSchemaConfig = SchemaConfig.newBuilder(username)
    .defaultSchema(defaultSchemaPath)
    .optionManager(optionManager)
    .setViewExpansionContext(viewExpansionContext)
    .build();

  Catalog catalog = sabotContext.getCatalogService()
      .getCatalog(MetadataRequestOptions.of(newSchemaConfig));
  JavaTypeFactory typeFactory = JavaTypeFactoryImpl.INSTANCE;
  DremioCatalogReader catalogReader = new DremioCatalogReader(catalog, typeFactory);

  FunctionImplementationRegistry functionImplementationRegistry = optionManager.getOption
    (PlannerSettings.ENABLE_DECIMAL_V2_KEY).getBoolVal() ? sabotContext.getDecimalFunctionImplementationRegistry()
      : sabotContext.getFunctionImplementationRegistry();
  OperatorTable opTable = new OperatorTable(functionImplementationRegistry);
  SqlOperatorTable chainedOpTable =  new ChainedSqlOperatorTable(ImmutableList.<SqlOperatorTable>of(opTable, catalogReader));

  // Create the appropriate implementation depending on intended use of the validator.
  SqlValidatorWithHints validator =
    createForSqlSuggestions ?
      new SqlAdvisorValidator(chainedOpTable, catalogReader, typeFactory, DremioSqlConformance.INSTANCE) :
      SqlValidatorUtil.newValidator(chainedOpTable, catalogReader, typeFactory, DremioSqlConformance.INSTANCE);

  return new SQLAnalyzer(validator);
}
 
Example #14
Source File: PlannerImpl.java    From Mycat2 with GNU General Public License v3.0 5 votes vote down vote up
private SqlValidator createSqlValidator(CalciteCatalogReader catalogReader) {
    final SqlOperatorTable opTab =
            ChainedSqlOperatorTable.of(operatorTable, catalogReader);
    return new CalciteSqlValidator(opTab,
            catalogReader,
            typeFactory,
            sqlValidatorConfig
                    .withDefaultNullCollation(connectionConfig.defaultNullCollation())
                    .withLenientOperatorLookup(connectionConfig.lenientOperatorLookup())
                    .withSqlConformance(connectionConfig.conformance())
                    .withIdentifierExpansion(true));
}
 
Example #15
Source File: HiveTableEnv.java    From marble with Apache License 2.0 5 votes vote down vote up
public static TableEnv getTableEnv() {
    TableConfig tableConfig = new TableConfig();
    tableConfig.setSqlOperatorTable(
        ChainedSqlOperatorTable.of(HiveSqlOperatorTable.instance(),
            SqlStdOperatorTable.instance()));
    tableConfig.setSqlParserConfig(SqlParser
        .configBuilder()
        .setLex(Lex.JAVA).setCaseSensitive(false).setConformance(
            SqlConformanceEnum.HIVE)
        .setParserFactory(HiveSqlParserImpl.FACTORY)
        .build());
//    tableConfig.setRelDataTypeSystem(new HiveTypeSystemImpl());
    Properties prop = new Properties();
    prop.setProperty(CalciteConnectionProperty.CASE_SENSITIVE.camelName(),
        String.valueOf(tableConfig.getSqlParserConfig().caseSensitive()));
    tableConfig.setCalciteConnectionConfig(
        new CalciteConnectionConfigImpl(prop));
    tableConfig.setConvertletTable(new HiveConvertletTable());
    RexExecutor rexExecutor = new HiveRexExecutorImpl(
        Schemas.createDataContext(null, null));
    tableConfig.setRexExecutor(rexExecutor);
    TableEnv tableEnv = new HiveTableEnv(tableConfig);
    //add table functions
    tableEnv.addFunction("", "explode",
        "org.apache.calcite.adapter.hive.udtf.UDTFExplode", "eval");
    return tableEnv;
  }
 
Example #16
Source File: PlannerContext.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Returns the operator table for this environment including a custom Calcite configuration.
 */
private SqlOperatorTable getSqlOperatorTable(CalciteConfig calciteConfig, FunctionCatalog functionCatalog) {
	return JavaScalaConversionUtil.toJava(calciteConfig.getSqlOperatorTable()).map(operatorTable -> {
				if (calciteConfig.replacesSqlOperatorTable()) {
					return operatorTable;
				} else {
					return ChainedSqlOperatorTable.of(getBuiltinSqlOperatorTable(functionCatalog), operatorTable);
				}
			}
	).orElseGet(() -> getBuiltinSqlOperatorTable(functionCatalog));
}
 
Example #17
Source File: QueryPlanner.java    From samza with Apache License 2.0 4 votes vote down vote up
public RelRoot plan(String query) {
  try {
    Connection connection = DriverManager.getConnection("jdbc:calcite:");
    CalciteConnection calciteConnection = connection.unwrap(CalciteConnection.class);
    SchemaPlus rootSchema = calciteConnection.getRootSchema();
    registerSourceSchemas(rootSchema);

    List<SamzaSqlScalarFunctionImpl> samzaSqlFunctions = udfMetadata.stream()
        .map(x -> new SamzaSqlScalarFunctionImpl(x))
        .collect(Collectors.toList());

    final List<RelTraitDef> traitDefs = new ArrayList<>();

    traitDefs.add(ConventionTraitDef.INSTANCE);
    traitDefs.add(RelCollationTraitDef.INSTANCE);

    List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
    sqlOperatorTables.add(new SamzaSqlOperatorTable());
    sqlOperatorTables.add(new SamzaSqlUdfOperatorTable(samzaSqlFunctions));

    // Using lenient so that !=,%,- are allowed.
    FrameworkConfig frameworkConfig = Frameworks.newConfigBuilder()
        .parserConfig(SqlParser.configBuilder()
            .setLex(Lex.JAVA)
            .setConformance(SqlConformanceEnum.LENIENT)
            .setCaseSensitive(false) // Make Udfs case insensitive
            .build())
        .defaultSchema(rootSchema)
        .operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables))
        .sqlToRelConverterConfig(SqlToRelConverter.Config.DEFAULT)
        .traitDefs(traitDefs)
        .context(Contexts.EMPTY_CONTEXT)
        .costFactory(null)
        .build();
    Planner planner = Frameworks.getPlanner(frameworkConfig);

    SqlNode sql = planner.parse(query);
    SqlNode validatedSql = planner.validate(sql);
    RelRoot relRoot = planner.rel(validatedSql);
    LOG.info("query plan:\n" + RelOptUtil.toString(relRoot.rel, SqlExplainLevel.ALL_ATTRIBUTES));
    return relRoot;
  } catch (Exception e) {
    String errorMsg = SamzaSqlValidator.formatErrorString(query, e);
    LOG.error(errorMsg, e);
    throw new SamzaException(errorMsg, e);
  }
}
 
Example #18
Source File: TestCompilerUtils.java    From streamline with Apache License 2.0 4 votes vote down vote up
public static CalciteState sqlOverNestedTable(String sql)
        throws RelConversionException, ValidationException, SqlParseException {
    SchemaPlus schema = Frameworks.createRootSchema(true);
    JavaTypeFactory typeFactory = new JavaTypeFactoryImpl
            (RelDataTypeSystem.DEFAULT);

    StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory)
            .field("ID", SqlTypeName.INTEGER)
            .field("MAPFIELD",
                    typeFactory.createTypeWithNullability(
                            typeFactory.createMapType(
                                    typeFactory.createTypeWithNullability(
                                            typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
                                    typeFactory.createTypeWithNullability(
                                            typeFactory.createSqlType(SqlTypeName.INTEGER), true))
                            , true))
            .field("NESTEDMAPFIELD",
                    typeFactory.createTypeWithNullability(
                        typeFactory.createMapType(
                                typeFactory.createTypeWithNullability(
                                        typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
                                typeFactory.createTypeWithNullability(
                                        typeFactory.createMapType(
                                                typeFactory.createTypeWithNullability(
                                                        typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
                                                typeFactory.createTypeWithNullability(
                                                        typeFactory.createSqlType(SqlTypeName.INTEGER), true))
                                        , true))
                                    , true))
            .field("ARRAYFIELD", typeFactory.createTypeWithNullability(
                    typeFactory.createArrayType(
                        typeFactory.createTypeWithNullability(
                            typeFactory.createSqlType(SqlTypeName.INTEGER), true), -1L)
                    , true))
            .build();
    Table table = streamableTable.stream();
    schema.add("FOO", table);
    schema.add("BAR", table);
    schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));
    List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
    sqlOperatorTables.add(SqlStdOperatorTable.instance());
    sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
                                                   false,
                                                   Collections.<String>emptyList(), typeFactory));
    SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
    FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(
            schema).operatorTable(chainedSqlOperatorTable).build();
    Planner planner = Frameworks.getPlanner(config);
    SqlNode parse = planner.parse(sql);
    SqlNode validate = planner.validate(parse);
    RelNode tree = planner.convert(validate);
    System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
    return new CalciteState(schema, tree);
}
 
Example #19
Source File: TableEnv.java    From marble with Apache License 2.0 4 votes vote down vote up
public TableEnv(TableConfig tableConfig) {
  try {
    this.tableConfig = tableConfig;
    SqlParser.Config sqlParserConfig = tableConfig.getSqlParserConfig()
        != null ? tableConfig.getSqlParserConfig() : SqlParser
        .configBuilder().setCaseSensitive(false)
        .build();
    SqlOperatorTable sqlStdOperatorTable = tableConfig
        .getSqlOperatorTable()
        != null
        ? tableConfig.getSqlOperatorTable()
        : ChainedSqlOperatorTable.of(SqlStdOperatorTable.instance());
    CalciteConnectionConfig calciteConnectionConfig = tableConfig
        .getCalciteConnectionConfig()
        != null
        ? tableConfig.getCalciteConnectionConfig()
        : createDefaultConnectionConfig(sqlParserConfig);
    RelDataTypeSystem typeSystem = tableConfig.getRelDataTypeSystem() != null
        ? tableConfig.getRelDataTypeSystem()
        : calciteConnectionConfig.typeSystem(RelDataTypeSystem.class,
            RelDataTypeSystem.DEFAULT);
    SqlRexConvertletTable convertletTable = tableConfig
        .getConvertletTable()
        != null
        ? tableConfig
        .getConvertletTable()
        : StandardConvertletTable.INSTANCE;
    RexExecutor rexExecutor = tableConfig.getRexExecutor() != null
        ? tableConfig.getRexExecutor()
        : RexUtil.EXECUTOR;
    this.calciteCatalogReader = new CalciteCatalogReader(
        CalciteSchema.from(rootSchema),
        CalciteSchema.from(rootSchema).path(null),
        new JavaTypeFactoryImpl(typeSystem),
        calciteConnectionConfig);
    this.frameworkConfig = createFrameworkConfig(sqlParserConfig,
        ChainedSqlOperatorTable.of(sqlStdOperatorTable,
            calciteCatalogReader), convertletTable,
        calciteConnectionConfig, typeSystem, rexExecutor);
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
Example #20
Source File: PlannerContext.java    From flink with Apache License 2.0 4 votes vote down vote up
/**
 * Returns builtin the operator table and external the operator for this environment.
 */
private SqlOperatorTable getBuiltinSqlOperatorTable(FunctionCatalog functionCatalog) {
	return ChainedSqlOperatorTable.of(
			new FunctionCatalogOperatorTable(functionCatalog, typeFactory),
			FlinkSqlOperatorTable.instance());
}