org.apache.calcite.prepare.CalciteCatalogReader Java Examples

The following examples show how to use org.apache.calcite.prepare.CalciteCatalogReader. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SqlExprToRexConverterImpl.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Creates a catalog reader that contains a single {@link Table} with temporary table name
 * and specified {@code rowType}.
 *
 * @param rowType     table row type
 * @return the {@link CalciteCatalogReader} instance
 */
private static CalciteCatalogReader createSingleTableCatalogReader(
		boolean lenientCaseSensitivity,
		FrameworkConfig config,
		FlinkTypeFactory typeFactory,
		RelDataType rowType) {

	// connection properties
	boolean caseSensitive = !lenientCaseSensitivity && config.getParserConfig().caseSensitive();
	Properties properties = new Properties();
	properties.put(
		CalciteConnectionProperty.CASE_SENSITIVE.camelName(),
		String.valueOf(caseSensitive));
	CalciteConnectionConfig connectionConfig = new CalciteConnectionConfigImpl(properties);

	// prepare root schema
	final RowTypeSpecifiedTable table = new RowTypeSpecifiedTable(rowType);
	final Map<String, Table> tableMap = Collections.singletonMap(TEMPORARY_TABLE_NAME, table);
	CalciteSchema schema = CalciteSchemaBuilder.asRootSchema(new TableSpecifiedSchema(tableMap));

	return new FlinkCalciteCatalogReader(
		schema,
		new ArrayList<>(new ArrayList<>()),
		typeFactory,
		connectionConfig);
}
 
Example #2
Source File: AbstractMaterializedViewTest.java    From calcite with Apache License 2.0 6 votes vote down vote up
private RelNode toRel(RelOptCluster cluster, SchemaPlus rootSchema,
    SchemaPlus defaultSchema, String sql) throws SqlParseException {
  final SqlParser parser = SqlParser.create(sql, SqlParser.Config.DEFAULT);
  final SqlNode parsed = parser.parseStmt();

  final CalciteCatalogReader catalogReader = new CalciteCatalogReader(
      CalciteSchema.from(rootSchema),
      CalciteSchema.from(defaultSchema).path(null),
      new JavaTypeFactoryImpl(), new CalciteConnectionConfigImpl(new Properties()));

  final SqlValidator validator = new ValidatorForTest(SqlStdOperatorTable.instance(),
      catalogReader, new JavaTypeFactoryImpl(), SqlConformanceEnum.DEFAULT);
  final SqlNode validated = validator.validate(parsed);
  final SqlToRelConverter.Config config = SqlToRelConverter.configBuilder()
      .withTrimUnusedFields(true)
      .withExpand(true)
      .withDecorrelationEnabled(true)
      .build();
  final SqlToRelConverter converter = new SqlToRelConverter(
      (rowType, queryString, schemaPath, viewPath) -> {
        throw new UnsupportedOperationException("cannot expand view");
      }, validator, catalogReader, cluster, StandardConvertletTable.INSTANCE, config);
  return converter.convertQuery(validated, false, true).rel;
}
 
Example #3
Source File: PlannerImpl.java    From Mycat2 with GNU General Public License v3.0 5 votes vote down vote up
@Override
public RelRoot expandView(RelDataType rowType, String queryString,
                          List<String> schemaPath, List<String> viewPath) {
    if (planner == null) {
        ready();
    }
    SqlParser parser = SqlParser.create(queryString, parserConfig);
    SqlNode sqlNode;
    try {
        sqlNode = parser.parseQuery();
    } catch (SqlParseException e) {
        throw new RuntimeException("parse failed", e);
    }

    final CalciteCatalogReader catalogReader =
            createCatalogReader().withSchemaPath(schemaPath);
    final SqlValidator validator = createSqlValidator(catalogReader);

    final RexBuilder rexBuilder = createRexBuilder();
    final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
    final SqlToRelConverter.Config config = SqlToRelConverter
            .configBuilder()
            .withConfig(sqlToRelConverterConfig)
            .withTrimUnusedFields(false)
            .build();
    final SqlToRelConverter sqlToRelConverter =
            new SqlToRelConverter(this, validator,
                    catalogReader, cluster, convertletTable, config);

    final RelRoot root =
            sqlToRelConverter.convertQuery(sqlNode, true, false);
    final RelRoot root2 =
            root.withRel(sqlToRelConverter.flattenTypes(root.rel, true));
    final RelBuilder relBuilder =
            config.getRelBuilderFactory().create(cluster, null);
    return root2.withRel(
            RelDecorrelator.decorrelateQuery(root.rel, relBuilder));
}
 
Example #4
Source File: PlannerImpl.java    From Mycat2 with GNU General Public License v3.0 5 votes vote down vote up
private SqlValidator createSqlValidator(CalciteCatalogReader catalogReader) {
    final SqlOperatorTable opTab =
            ChainedSqlOperatorTable.of(operatorTable, catalogReader);
    return new CalciteSqlValidator(opTab,
            catalogReader,
            typeFactory,
            sqlValidatorConfig
                    .withDefaultNullCollation(connectionConfig.defaultNullCollation())
                    .withLenientOperatorLookup(connectionConfig.lenientOperatorLookup())
                    .withSqlConformance(connectionConfig.conformance())
                    .withIdentifierExpansion(true));
}
 
Example #5
Source File: SQLExecEnvironment.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
/**
 * Method method build a calcite framework configuration for calcite to parse SQL and generate relational tree
 * out of it.
 * @return FrameworkConfig
 */
private FrameworkConfig buildFrameWorkConfig()
{
  List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
  sqlOperatorTables.add(SqlStdOperatorTable.instance());
  sqlOperatorTables
    .add(new CalciteCatalogReader(CalciteSchema.from(schema), false, Collections.<String>emptyList(), typeFactory));
  return Frameworks.newConfigBuilder().defaultSchema(schema)
    .parserConfig(SqlParser.configBuilder().setLex(Lex.MYSQL).build())
    .operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables)).build();
}
 
Example #6
Source File: StreamlineSqlImpl.java    From streamline with Apache License 2.0 5 votes vote down vote up
private FrameworkConfig buildFrameWorkConfig() {
  if (hasUdf) {
    List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
    sqlOperatorTables.add(SqlStdOperatorTable.instance());
    sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
                                                   false,
                                                   Collections.<String>emptyList(), typeFactory));
    return Frameworks.newConfigBuilder().defaultSchema(schema)
            .operatorTable(new ChainedSqlOperatorTable(sqlOperatorTables)).build();
  } else {
    return Frameworks.newConfigBuilder().defaultSchema(schema).build();
  }
}
 
Example #7
Source File: TestCompilerUtils.java    From streamline with Apache License 2.0 5 votes vote down vote up
public static CalciteState sqlOverDummyTable(String sql)
        throws RelConversionException, ValidationException, SqlParseException {
    SchemaPlus schema = Frameworks.createRootSchema(true);
    JavaTypeFactory typeFactory = new JavaTypeFactoryImpl
            (RelDataTypeSystem.DEFAULT);
    StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory)
            .field("ID", SqlTypeName.INTEGER)
            .field("NAME", typeFactory.createType(String.class))
            .field("ADDR", typeFactory.createType(String.class))
            .build();
    Table table = streamableTable.stream();
    schema.add("FOO", table);
    schema.add("BAR", table);
    schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));

    List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
    sqlOperatorTables.add(SqlStdOperatorTable.instance());
    sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
            false,
            Collections.<String>emptyList(), typeFactory));
    SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
    FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(
            schema).operatorTable(chainedSqlOperatorTable).build();
    Planner planner = Frameworks.getPlanner(config);
    SqlNode parse = planner.parse(sql);
    SqlNode validate = planner.validate(parse);
    RelNode tree = planner.convert(validate);
    System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
    return new CalciteState(schema, tree);
}
 
Example #8
Source File: QuarkTileTable.java    From quark with Apache License 2.0 5 votes vote down vote up
public QuarkTileTable(QuarkTile quarkTile, CalciteCatalogReader calciteCatalogReader,
                      RelDataType relDataType, Path path, QuarkTable backingTable) {
  this.quarkTile = quarkTile;
  this.backingTable = backingTable;
  this.relOptTable = RelOptTableImpl.create(
      calciteCatalogReader,
      relDataType,
      this,
      path);
}
 
Example #9
Source File: SqlWorker.java    From quark with Apache License 2.0 5 votes vote down vote up
private CalciteCatalogReader createCatalogReader(QueryContext context) {
  return new CalciteCatalogReader(
      CalciteSchema.from(context.getRootSchema()),
      false,
      context.getDefaultSchemaPath(),
      context.getTypeFactory());
}
 
Example #10
Source File: ContextSqlValidator.java    From calcite with Apache License 2.0 5 votes vote down vote up
private static CalciteCatalogReader getCatalogReader(
    CalcitePrepare.Context context, boolean mutable) {
  return new CalciteCatalogReader(
      mutable ? context.getMutableRootSchema() : context.getRootSchema(),
      ImmutableList.of(),
      context.getTypeFactory(),
      new CalciteConnectionConfigImpl(new Properties()));
}
 
Example #11
Source File: SqlValidatorUtil.java    From calcite with Apache License 2.0 5 votes vote down vote up
/**
 * Creates a catalog reader that contains a single {@link Table} with temporary table name
 * and specified {@code rowType}.
 *
 * <p>Make this method public so that other systems can also use it.
 *
 * @param caseSensitive whether to match case sensitively
 * @param tableName     table name to register with
 * @param typeFactory   type factory
 * @param rowType       table row type
 * @return the {@link CalciteCatalogReader} instance
 */
public static CalciteCatalogReader createSingleTableCatalogReader(
    boolean caseSensitive,
    String tableName,
    RelDataTypeFactory typeFactory,
    RelDataType rowType) {
  // connection properties
  Properties properties = new Properties();
  properties.put(
      CalciteConnectionProperty.CASE_SENSITIVE.camelName(),
      String.valueOf(caseSensitive));
  CalciteConnectionConfig connectionConfig = new CalciteConnectionConfigImpl(properties);

  // prepare root schema
  final ExplicitRowTypeTable table = new ExplicitRowTypeTable(rowType);
  final Map<String, Table> tableMap = Collections.singletonMap(tableName, table);
  CalciteSchema schema = CalciteSchema.createRootSchema(
      false,
      false,
      "",
      new ExplicitTableSchema(tableMap));

  return new CalciteCatalogReader(
      schema,
      new ArrayList<>(new ArrayList<>()),
      typeFactory,
      connectionConfig);
}
 
Example #12
Source File: TraitPropagationTest.java    From calcite with Apache License 2.0 5 votes vote down vote up
private static RelNode run(PropAction action, RuleSet rules)
    throws Exception {

  FrameworkConfig config = Frameworks.newConfigBuilder()
      .ruleSets(rules).build();

  final Properties info = new Properties();
  final Connection connection = DriverManager
      .getConnection("jdbc:calcite:", info);
  final CalciteServerStatement statement = connection
      .createStatement().unwrap(CalciteServerStatement.class);
  final CalcitePrepare.Context prepareContext =
        statement.createPrepareContext();
  final JavaTypeFactory typeFactory = prepareContext.getTypeFactory();
  CalciteCatalogReader catalogReader =
        new CalciteCatalogReader(prepareContext.getRootSchema(),
            prepareContext.getDefaultSchemaPath(),
            typeFactory,
            prepareContext.config());
  final RexBuilder rexBuilder = new RexBuilder(typeFactory);
  final RelOptPlanner planner = new VolcanoPlanner(config.getCostFactory(),
      config.getContext());

  // set up rules before we generate cluster
  planner.clearRelTraitDefs();
  planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
  planner.addRelTraitDef(ConventionTraitDef.INSTANCE);

  planner.clear();
  for (RelOptRule r : rules) {
    planner.addRule(r);
  }

  final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
  return action.apply(cluster, catalogReader,
      prepareContext.getRootSchema().plus());
}
 
Example #13
Source File: TableEnv.java    From marble with Apache License 2.0 4 votes vote down vote up
public TableEnv(TableConfig tableConfig) {
  try {
    this.tableConfig = tableConfig;
    SqlParser.Config sqlParserConfig = tableConfig.getSqlParserConfig()
        != null ? tableConfig.getSqlParserConfig() : SqlParser
        .configBuilder().setCaseSensitive(false)
        .build();
    SqlOperatorTable sqlStdOperatorTable = tableConfig
        .getSqlOperatorTable()
        != null
        ? tableConfig.getSqlOperatorTable()
        : ChainedSqlOperatorTable.of(SqlStdOperatorTable.instance());
    CalciteConnectionConfig calciteConnectionConfig = tableConfig
        .getCalciteConnectionConfig()
        != null
        ? tableConfig.getCalciteConnectionConfig()
        : createDefaultConnectionConfig(sqlParserConfig);
    RelDataTypeSystem typeSystem = tableConfig.getRelDataTypeSystem() != null
        ? tableConfig.getRelDataTypeSystem()
        : calciteConnectionConfig.typeSystem(RelDataTypeSystem.class,
            RelDataTypeSystem.DEFAULT);
    SqlRexConvertletTable convertletTable = tableConfig
        .getConvertletTable()
        != null
        ? tableConfig
        .getConvertletTable()
        : StandardConvertletTable.INSTANCE;
    RexExecutor rexExecutor = tableConfig.getRexExecutor() != null
        ? tableConfig.getRexExecutor()
        : RexUtil.EXECUTOR;
    this.calciteCatalogReader = new CalciteCatalogReader(
        CalciteSchema.from(rootSchema),
        CalciteSchema.from(rootSchema).path(null),
        new JavaTypeFactoryImpl(typeSystem),
        calciteConnectionConfig);
    this.frameworkConfig = createFrameworkConfig(sqlParserConfig,
        ChainedSqlOperatorTable.of(sqlStdOperatorTable,
            calciteCatalogReader), convertletTable,
        calciteConnectionConfig, typeSystem, rexExecutor);
  } catch (Exception e) {
    throw new RuntimeException(e);
  }
}
 
Example #14
Source File: PlannerImpl.java    From Mycat2 with GNU General Public License v3.0 4 votes vote down vote up
public CalciteCatalogReader createCatalogReader() {
    return createCalciteCatalogReader();
}
 
Example #15
Source File: CalciteSqlValidator.java    From Mycat2 with GNU General Public License v3.0 4 votes vote down vote up
CalciteSqlValidator(SqlOperatorTable opTab,
                    CalciteCatalogReader catalogReader, JavaTypeFactory typeFactory,
                    Config config) {
  super(opTab, catalogReader, typeFactory, config);
}
 
Example #16
Source File: TestCompilerUtils.java    From streamline with Apache License 2.0 4 votes vote down vote up
public static CalciteState sqlOverNestedTable(String sql)
        throws RelConversionException, ValidationException, SqlParseException {
    SchemaPlus schema = Frameworks.createRootSchema(true);
    JavaTypeFactory typeFactory = new JavaTypeFactoryImpl
            (RelDataTypeSystem.DEFAULT);

    StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory)
            .field("ID", SqlTypeName.INTEGER)
            .field("MAPFIELD",
                    typeFactory.createTypeWithNullability(
                            typeFactory.createMapType(
                                    typeFactory.createTypeWithNullability(
                                            typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
                                    typeFactory.createTypeWithNullability(
                                            typeFactory.createSqlType(SqlTypeName.INTEGER), true))
                            , true))
            .field("NESTEDMAPFIELD",
                    typeFactory.createTypeWithNullability(
                        typeFactory.createMapType(
                                typeFactory.createTypeWithNullability(
                                        typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
                                typeFactory.createTypeWithNullability(
                                        typeFactory.createMapType(
                                                typeFactory.createTypeWithNullability(
                                                        typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
                                                typeFactory.createTypeWithNullability(
                                                        typeFactory.createSqlType(SqlTypeName.INTEGER), true))
                                        , true))
                                    , true))
            .field("ARRAYFIELD", typeFactory.createTypeWithNullability(
                    typeFactory.createArrayType(
                        typeFactory.createTypeWithNullability(
                            typeFactory.createSqlType(SqlTypeName.INTEGER), true), -1L)
                    , true))
            .build();
    Table table = streamableTable.stream();
    schema.add("FOO", table);
    schema.add("BAR", table);
    schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));
    List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
    sqlOperatorTables.add(SqlStdOperatorTable.instance());
    sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
                                                   false,
                                                   Collections.<String>emptyList(), typeFactory));
    SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
    FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(
            schema).operatorTable(chainedSqlOperatorTable).build();
    Planner planner = Frameworks.getPlanner(config);
    SqlNode parse = planner.parse(sql);
    SqlNode validate = planner.validate(parse);
    RelNode tree = planner.convert(validate);
    System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
    return new CalciteState(schema, tree);
}