Java Code Examples for org.apache.calcite.jdbc.CalciteSchema

The following examples show how to use org.apache.calcite.jdbc.CalciteSchema. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: Flink-CEPplus   Source File: SqlValidatorImpl.java    License: Apache License 2.0 6 votes vote down vote up
private Table findTable(CalciteSchema schema, String tableName, boolean caseSensitive) {
	CalciteSchema.TableEntry entry = schema.getTable(tableName, caseSensitive);
	if (entry != null) {
		return entry.getTable();
	}

	// Check sub schemas
	for (CalciteSchema subSchema : schema.getSubSchemaMap().values()) {
		Table table = findTable(subSchema, tableName, caseSensitive);
		if (table != null) {
			return table;
		}
	}

	return null;
}
 
Example 2
Source Project: Flink-CEPplus   Source File: SqlValidatorImpl.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Given a table alias, find the corresponding {@link Table} associated with it
 * */
private Table findTable(String alias) {
	List<String> names = null;
	if (tableScope == null) {
		// no tables to find
		return null;
	}

	for (ScopeChild child : tableScope.children) {
		if (catalogReader.nameMatcher().matches(child.name, alias)) {
			names = ((SqlIdentifier) child.namespace.getNode()).names;
			break;
		}
	}
	if (names == null || names.size() == 0) {
		return null;
	} else if (names.size() == 1) {
		return findTable(catalogReader.getRootSchema(), names.get(0),
			catalogReader.nameMatcher().isCaseSensitive());
	}

	CalciteSchema.TableEntry entry =
		SqlValidatorUtil.getTableEntry(catalogReader, names);

	return entry == null ? null : entry.getTable();
}
 
Example 3
Source Project: flink   Source File: PlanningConfigurationBuilder.java    License: Apache License 2.0 6 votes vote down vote up
public PlanningConfigurationBuilder(
		TableConfig tableConfig,
		FunctionCatalog functionCatalog,
		CalciteSchema rootSchema,
		ExpressionBridge<PlannerExpression> expressionBridge) {
	this.tableConfig = tableConfig;
	this.functionCatalog = functionCatalog;

	// the converter is needed when calling temporal table functions from SQL, because
	// they reference a history table represented with a tree of table operations
	this.context = Contexts.of(expressionBridge);

	this.planner = new VolcanoPlanner(costFactory, context);
	planner.setExecutor(new ExpressionReducer(tableConfig));
	planner.addRelTraitDef(ConventionTraitDef.INSTANCE);

	this.expressionBridge = expressionBridge;

	this.rootSchema = rootSchema;
}
 
Example 4
Source Project: calcite   Source File: Schemas.java    License: Apache License 2.0 6 votes vote down vote up
/** Analyzes a view. For use within Calcite only. */
public static CalcitePrepare.AnalyzeViewResult analyzeView(
    final CalciteConnection connection, final CalciteSchema schema,
    final List<String> schemaPath, final String viewSql,
    List<String> viewPath, boolean fail) {
  final CalcitePrepare prepare = CalcitePrepare.DEFAULT_FACTORY.apply();
  final ImmutableMap<CalciteConnectionProperty, String> propValues =
      ImmutableMap.of();
  final CalcitePrepare.Context context =
      makeContext(connection, schema, schemaPath, viewPath, propValues);
  CalcitePrepare.Dummy.push(context);
  try {
    return prepare.analyzeView(context, viewSql, fail);
  } finally {
    CalcitePrepare.Dummy.pop(context);
  }
}
 
Example 5
Source Project: calcite   Source File: LatticeTest.java    License: Apache License 2.0 6 votes vote down vote up
/** Tests some of the properties of the {@link Lattice} data structure. */
@Test void testLattice() throws Exception {
  modelWithLattice("star",
      "select 1 from \"foodmart\".\"sales_fact_1997\" as s\n"
          + "join \"foodmart\".\"product\" as p using (\"product_id\")\n"
          + "join \"foodmart\".\"time_by_day\" as t on t.\"time_id\" = s.\"time_id\"")
      .doWithConnection(c -> {
        final SchemaPlus schema = c.getRootSchema();
        final SchemaPlus adhoc = schema.getSubSchema("adhoc");
        assertThat(adhoc.getTableNames().contains("EMPLOYEES"), is(true));
        final Map.Entry<String, CalciteSchema.LatticeEntry> entry =
            adhoc.unwrap(CalciteSchema.class).getLatticeMap().firstEntry();
        final Lattice lattice = entry.getValue().getLattice();
        assertThat(lattice.firstColumn("S"), is(0));
        assertThat(lattice.firstColumn("P"), is(8));
        assertThat(lattice.firstColumn("T"), is(23));
        assertThat(lattice.firstColumn("PC"), is(-1));
        assertThat(lattice.defaultMeasures.size(), is(1));
        assertThat(lattice.rootNode.descendants.size(), is(3));
      });
}
 
Example 6
Source Project: quark   Source File: SqlWorker.java    License: Apache License 2.0 6 votes vote down vote up
private void populateMaterializationsAndLattice(
    QuarkMaterializeCluster.RelOptPlannerHolder plannerHolder,
    CalciteSchema rootSchema) {
  if (materializations == null) {
    materializations =
        MaterializationService.instance().query(rootSchema);
  }
  Materializer materializer = new Materializer(materializations);

  materializer.populateMaterializations(context.getPrepareContext(), plannerHolder);

  List<CalciteSchema.LatticeEntry> lattices = Schemas.getLatticeEntries(rootSchema);

  for (CalciteSchema.LatticeEntry lattice : lattices) {
    final CalciteSchema.TableEntry starTable = lattice.getStarTable();
    final JavaTypeFactory typeFactory = context.getTypeFactory();
    final RelOptTableImpl starRelOptTable =
        RelOptTableImpl.create(catalogReader,
            starTable.getTable().getRowType(typeFactory), starTable, null);
    plannerHolder.getPlanner().addLattice(
        new RelOptLattice(lattice.getLattice(), starRelOptTable));
  }
}
 
Example 7
Source Project: flink   Source File: PlanningConfigurationBuilder.java    License: Apache License 2.0 6 votes vote down vote up
public PlanningConfigurationBuilder(
		TableConfig tableConfig,
		FunctionCatalog functionCatalog,
		CalciteSchema rootSchema,
		ExpressionBridge<PlannerExpression> expressionBridge) {
	this.tableConfig = tableConfig;
	this.functionCatalog = functionCatalog;

	// the converter is needed when calling temporal table functions from SQL, because
	// they reference a history table represented with a tree of table operations.
	this.context = Contexts.of(expressionBridge, tableConfig);

	this.planner = new VolcanoPlanner(costFactory, context);
	planner.setExecutor(new ExpressionReducer(tableConfig));
	planner.addRelTraitDef(ConventionTraitDef.INSTANCE);

	this.expressionBridge = expressionBridge;

	this.rootSchema = rootSchema;
}
 
Example 8
Source Project: flink   Source File: SqlExprToRexConverterImpl.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a catalog reader that contains a single {@link Table} with temporary table name
 * and specified {@code rowType}.
 *
 * @param rowType     table row type
 * @return the {@link CalciteCatalogReader} instance
 */
private static CalciteCatalogReader createSingleTableCatalogReader(
		boolean lenientCaseSensitivity,
		FrameworkConfig config,
		FlinkTypeFactory typeFactory,
		RelDataType rowType) {

	// connection properties
	boolean caseSensitive = !lenientCaseSensitivity && config.getParserConfig().caseSensitive();
	Properties properties = new Properties();
	properties.put(
		CalciteConnectionProperty.CASE_SENSITIVE.camelName(),
		String.valueOf(caseSensitive));
	CalciteConnectionConfig connectionConfig = new CalciteConnectionConfigImpl(properties);

	// prepare root schema
	final RowTypeSpecifiedTable table = new RowTypeSpecifiedTable(rowType);
	final Map<String, Table> tableMap = Collections.singletonMap(TEMPORARY_TABLE_NAME, table);
	CalciteSchema schema = CalciteSchemaBuilder.asRootSchema(new TableSpecifiedSchema(tableMap));

	return new FlinkCalciteCatalogReader(
		schema,
		new ArrayList<>(new ArrayList<>()),
		typeFactory,
		connectionConfig);
}
 
Example 9
Source Project: flink   Source File: FlinkCalciteCatalogReader.java    License: Apache License 2.0 6 votes vote down vote up
public FlinkCalciteCatalogReader(
	CalciteSchema rootSchema,
	List<List<String>> defaultSchemas,
	RelDataTypeFactory typeFactory,
	CalciteConnectionConfig config) {

	super(
		rootSchema,
		SqlNameMatchers.withCaseSensitive(config != null && config.caseSensitive()),
			Stream.concat(
				defaultSchemas.stream(),
				Stream.of(Collections.<String>emptyList())
			).collect(Collectors.toList()),
		typeFactory,
		config);
}
 
Example 10
Source Project: flink   Source File: PlannerContext.java    License: Apache License 2.0 6 votes vote down vote up
private FlinkCalciteCatalogReader createCatalogReader(
		boolean lenientCaseSensitivity,
		String currentCatalog,
		String currentDatabase) {
	SqlParser.Config sqlParserConfig = getSqlParserConfig();
	final boolean caseSensitive;
	if (lenientCaseSensitivity) {
		caseSensitive = false;
	} else {
		caseSensitive = sqlParserConfig.caseSensitive();
	}

	SqlParser.Config newSqlParserConfig = SqlParser.configBuilder(sqlParserConfig)
			.setCaseSensitive(caseSensitive)
			.build();

	SchemaPlus rootSchema = getRootSchema(this.rootSchema.plus());
	return new FlinkCalciteCatalogReader(
			CalciteSchema.from(rootSchema),
			asList(
					asList(currentCatalog, currentDatabase),
					singletonList(currentCatalog)
			),
			typeFactory,
			CalciteConfig$.MODULE$.connectionConfig(newSqlParserConfig));
}
 
Example 11
Source Project: calcite   Source File: Schemas.java    License: Apache License 2.0 6 votes vote down vote up
public static CalciteSchema.FunctionEntry resolve(
    RelDataTypeFactory typeFactory,
    String name,
    Collection<CalciteSchema.FunctionEntry> functionEntries,
    List<RelDataType> argumentTypes) {
  final List<CalciteSchema.FunctionEntry> matches = new ArrayList<>();
  for (CalciteSchema.FunctionEntry entry : functionEntries) {
    if (matches(typeFactory, entry.getFunction(), argumentTypes)) {
      matches.add(entry);
    }
  }
  switch (matches.size()) {
  case 0:
    return null;
  case 1:
    return matches.get(0);
  default:
    throw new RuntimeException("More than one match for " + name
        + " with arguments " + argumentTypes);
  }
}
 
Example 12
Source Project: calcite   Source File: ServerDdlExecutor.java    License: Apache License 2.0 6 votes vote down vote up
/** Executes a {@code CREATE TYPE} command. */
public void execute(SqlCreateType create,
    CalcitePrepare.Context context) {
  final Pair<CalciteSchema, String> pair = schema(context, true, create.name);
  final SqlValidator validator = validator(context, false);
  pair.left.add(pair.right, typeFactory -> {
    if (create.dataType != null) {
      return create.dataType.deriveType(validator);
    } else {
      final RelDataTypeFactory.Builder builder = typeFactory.builder();
      for (SqlNode def : create.attributeDefs) {
        final SqlAttributeDefinition attributeDef =
            (SqlAttributeDefinition) def;
        final SqlDataTypeSpec typeSpec = attributeDef.dataType;
        final RelDataType type = typeSpec.deriveType(validator);
        builder.add(attributeDef.name.getSimple(), type);
      }
      return builder.build();
    }
  });
}
 
Example 13
Source Project: calcite   Source File: ServerDdlExecutor.java    License: Apache License 2.0 6 votes vote down vote up
/** Executes a {@code CREATE VIEW} command. */
public void execute(SqlCreateView create,
    CalcitePrepare.Context context) {
  final Pair<CalciteSchema, String> pair =
      schema(context, true, create.name);
  final SchemaPlus schemaPlus = pair.left.plus();
  for (Function function : schemaPlus.getFunctions(pair.right)) {
    if (function.getParameters().isEmpty()) {
      if (!create.getReplace()) {
        throw SqlUtil.newContextException(create.name.getParserPosition(),
            RESOURCE.viewExists(pair.right));
      }
      pair.left.removeFunction(pair.right);
    }
  }
  final SqlNode q = renameColumns(create.columnList, create.query);
  final String sql = q.toSqlString(CalciteSqlDialect.DEFAULT).getSql();
  final ViewTableMacro viewTableMacro =
      ViewTable.viewMacro(schemaPlus, sql, pair.left.path(null),
          context.getObjectPath(), false);
  final TranslatableTable x = viewTableMacro.apply(ImmutableList.of());
  Util.discard(x);
  schemaPlus.add(pair.right, viewTableMacro);
}
 
Example 14
Source Project: calcite   Source File: Lattice.java    License: Apache License 2.0 6 votes vote down vote up
/** Creates a Builder based upon a mutable node. */
Builder(LatticeSpace space, CalciteSchema schema,
    MutableNode mutableNode) {
  this.rootSchema = schema;

  final Fixer fixer = new Fixer();
  fixer.fixUp(mutableNode);

  final LatticeRootNode node0 = new LatticeRootNode(space, mutableNode);
  final LatticeRootNode node1 = space.nodeMap.get(node0.digest);
  final LatticeRootNode node;
  if (node1 != null) {
    node = node1;
  } else {
    node = node0;
    space.nodeMap.put(node0.digest, node0);
  }

  this.rootNode = node;
  baseColumns = fixer.columnList.build();
  columnsByAlias = fixer.columnAliasList.build();
}
 
Example 15
Source Project: calcite   Source File: AbstractMaterializedViewTest.java    License: Apache License 2.0 6 votes vote down vote up
private RelNode toRel(RelOptCluster cluster, SchemaPlus rootSchema,
    SchemaPlus defaultSchema, String sql) throws SqlParseException {
  final SqlParser parser = SqlParser.create(sql, SqlParser.Config.DEFAULT);
  final SqlNode parsed = parser.parseStmt();

  final CalciteCatalogReader catalogReader = new CalciteCatalogReader(
      CalciteSchema.from(rootSchema),
      CalciteSchema.from(defaultSchema).path(null),
      new JavaTypeFactoryImpl(), new CalciteConnectionConfigImpl(new Properties()));

  final SqlValidator validator = new ValidatorForTest(SqlStdOperatorTable.instance(),
      catalogReader, new JavaTypeFactoryImpl(), SqlConformanceEnum.DEFAULT);
  final SqlNode validated = validator.validate(parsed);
  final SqlToRelConverter.Config config = SqlToRelConverter.configBuilder()
      .withTrimUnusedFields(true)
      .withExpand(true)
      .withDecorrelationEnabled(true)
      .build();
  final SqlToRelConverter converter = new SqlToRelConverter(
      (rowType, queryString, schemaPath, viewPath) -> {
        throw new UnsupportedOperationException("cannot expand view");
      }, validator, catalogReader, cluster, StandardConvertletTable.INSTANCE, config);
  return converter.convertQuery(validated, false, true).rel;
}
 
Example 16
Source Project: calcite   Source File: CalciteMaterializer.java    License: Apache License 2.0 6 votes vote down vote up
/** Converts a relational expression to use a
 * {@link org.apache.calcite.schema.impl.StarTable} defined in {@code schema}.
 * Uses the first star table that fits. */
private Iterable<Callback> useStar(CalciteSchema schema, RelNode queryRel) {
  List<CalciteSchema.TableEntry> starTables =
      Schemas.getStarTables(schema.root());
  if (starTables.isEmpty()) {
    // Don't waste effort converting to leaf-join form.
    return ImmutableList.of();
  }
  final List<Callback> list = new ArrayList<>();
  final RelNode rel2 =
      RelOptMaterialization.toLeafJoinForm(queryRel);
  for (CalciteSchema.TableEntry starTable : starTables) {
    final Table table = starTable.getTable();
    assert table instanceof StarTable;
    RelOptTableImpl starRelOptTable =
        RelOptTableImpl.create(catalogReader, table.getRowType(typeFactory),
            starTable, null);
    final RelNode rel3 =
        RelOptMaterialization.tryUseStar(rel2, starRelOptTable);
    if (rel3 != null) {
      list.add(new Callback(rel3, starTable, starRelOptTable));
    }
  }
  return list;
}
 
Example 17
Source Project: calcite   Source File: CalcitePrepareImpl.java    License: Apache License 2.0 6 votes vote down vote up
protected void populateMaterializations(Context context,
    RelOptCluster cluster, Prepare.Materialization materialization) {
  // REVIEW: initialize queryRel and tableRel inside MaterializationService,
  // not here?
  try {
    final CalciteSchema schema = materialization.materializedTable.schema;
    CalciteCatalogReader catalogReader =
        new CalciteCatalogReader(
            schema.root(),
            materialization.viewSchemaPath,
            context.getTypeFactory(),
            context.config());
    final CalciteMaterializer materializer =
        new CalciteMaterializer(this, context, catalogReader, schema,
            cluster, createConvertletTable());
    materializer.populate(materialization);
  } catch (Exception e) {
    throw new RuntimeException("While populating materialization "
        + materialization.materializedTable.path(), e);
  }
}
 
Example 18
Source Project: calcite   Source File: CalcitePrepareImpl.java    License: Apache License 2.0 6 votes vote down vote up
CalcitePreparingStmt(CalcitePrepareImpl prepare,
    Context context,
    CatalogReader catalogReader,
    RelDataTypeFactory typeFactory,
    CalciteSchema schema,
    EnumerableRel.Prefer prefer,
    RelOptCluster cluster,
    Convention resultConvention,
    SqlRexConvertletTable convertletTable) {
  super(context, catalogReader, resultConvention);
  this.prepare = prepare;
  this.schema = schema;
  this.prefer = prefer;
  this.cluster = cluster;
  this.planner = cluster.getPlanner();
  this.rexBuilder = cluster.getRexBuilder();
  this.typeFactory = typeFactory;
  this.convertletTable = convertletTable;
}
 
Example 19
Source Project: calcite   Source File: RelOptTableImpl.java    License: Apache License 2.0 6 votes vote down vote up
public <T> T unwrap(Class<T> clazz) {
  if (clazz.isInstance(this)) {
    return clazz.cast(this);
  }
  if (clazz.isInstance(table)) {
    return clazz.cast(table);
  }
  if (table instanceof Wrapper) {
    final T t = ((Wrapper) table).unwrap(clazz);
    if (t != null) {
      return t;
    }
  }
  if (clazz == CalciteSchema.class) {
    return clazz.cast(
        Schemas.subSchema(((CalciteCatalogReader) schema).rootSchema,
            Util.skipLast(getQualifiedName())));
  }
  return null;
}
 
Example 20
Source Project: calcite   Source File: CalciteCatalogReader.java    License: Apache License 2.0 6 votes vote down vote up
public Prepare.PreparingTable getTable(final List<String> names) {
  // First look in the default schema, if any.
  // If not found, look in the root schema.
  CalciteSchema.TableEntry entry = SqlValidatorUtil.getTableEntry(this, names);
  if (entry != null) {
    final Table table = entry.getTable();
    if (table instanceof Wrapper) {
      final Prepare.PreparingTable relOptTable =
          ((Wrapper) table).unwrap(Prepare.PreparingTable.class);
      if (relOptTable != null) {
        return relOptTable;
      }
    }
    return RelOptTableImpl.create(this,
        table.getRowType(typeFactory), entry, null);
  }
  return null;
}
 
Example 21
Source Project: calcite   Source File: CalciteCatalogReader.java    License: Apache License 2.0 6 votes vote down vote up
/** Creates an operator table that contains functions in the given class.
 *
 * @see ModelHandler#addFunctions */
public static SqlOperatorTable operatorTable(String className) {
  // Dummy schema to collect the functions
  final CalciteSchema schema =
      CalciteSchema.createRootSchema(false, false);
  ModelHandler.addFunctions(schema.plus(), null, ImmutableList.of(),
      className, "*", true);

  // The following is technical debt; see [CALCITE-2082] Remove
  // RelDataTypeFactory argument from SqlUserDefinedAggFunction constructor
  final SqlTypeFactoryImpl typeFactory =
      new SqlTypeFactoryImpl(RelDataTypeSystem.DEFAULT);

  final ListSqlOperatorTable table = new ListSqlOperatorTable();
  for (String name : schema.getFunctionNames()) {
    for (Function function : schema.getFunctions(name, true)) {
      final SqlIdentifier id = new SqlIdentifier(name, SqlParserPos.ZERO);
      table.add(
          toOp(typeFactory, id, function));
    }
  }
  return table;
}
 
Example 22
Source Project: kareldb   Source File: Driver.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates an internal connection.
 */
CalciteConnection connect(CalciteSchema rootSchema,
                          JavaTypeFactory typeFactory, Properties properties) {
    return (CalciteConnection) ((CalciteFactory) factory)
        .newConnection(this, factory, CONNECT_STRING_PREFIX, properties,
            rootSchema, typeFactory);
}
 
Example 23
Source Project: flink   Source File: CatalogReader.java    License: Apache License 2.0 5 votes vote down vote up
public CatalogReader(
		CalciteSchema rootSchema,
		List<List<String>> defaultSchema,
		RelDataTypeFactory typeFactory,
		CalciteConnectionConfig config) {
	super(rootSchema,
		SqlNameMatchers.withCaseSensitive(config != null && config.caseSensitive()),
		Stream.concat(
			defaultSchema.stream(),
			Stream.of(Collections.<String>emptyList())
		).collect(Collectors.toList()),
		typeFactory,
		config);
}
 
Example 24
Source Project: calcite   Source File: Schemas.java    License: Apache License 2.0 5 votes vote down vote up
/** Generates a table name that is unique within the given schema. */
public static String uniqueTableName(CalciteSchema schema, String base) {
  String t = Objects.requireNonNull(base);
  for (int x = 0; schema.getTable(t, true) != null; x++) {
    t = base + x;
  }
  return t;
}
 
Example 25
Source Project: flink   Source File: FlinkCalciteCatalogReaderTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void init() {
	rootSchemaPlus = CalciteSchema.createRootSchema(true, false).plus();
	Properties prop = new Properties();
	prop.setProperty(CalciteConnectionProperty.CASE_SENSITIVE.camelName(), "false");
	CalciteConnectionConfigImpl calciteConnConfig = new CalciteConnectionConfigImpl(prop);
	catalogReader = new FlinkCalciteCatalogReader(
		CalciteSchema.from(rootSchemaPlus),
		Collections.emptyList(),
		typeFactory,
		calciteConnConfig);
}
 
Example 26
public static SchemaPlus getSchema(MycatDBClientBased based) {
    SchemaPlus plus = CalciteSchema.createRootSchema(true).plus();
    MycatDBClientBasedConfig config = based.config();
    for (Map.Entry<String, SchemaHandler> stringConcurrentHashMapEntry : config.getSchemaMap().entrySet()) {
        SchemaPlus schemaPlus = plus.add(stringConcurrentHashMapEntry.getKey(), new AbstractSchema());
        for (Map.Entry<String, TableHandler> entry : stringConcurrentHashMapEntry.getValue().logicTables().entrySet()) {
            TableHandler logicTable = entry.getValue();
            MycatLogicTable mycatLogicTable = new MycatLogicTable(logicTable);
            schemaPlus.add(entry.getKey(), mycatLogicTable);
        }
    }
    config.getReflectiveSchemas().forEach((key, value) -> plus.add(key, new MycatReflectiveSchema(value)));
    return plus;
}
 
Example 27
Source Project: calcite   Source File: Frameworks.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Initializes a container then calls user-specified code with a planner.
 *
 * @param action Callback containing user-specified code
 * @param config FrameworkConfig to use for planner action.
 * @return Return value from action
 */
public static <R> R withPlanner(final PlannerAction<R> action,
    final FrameworkConfig config) {
  return withPrepare(config,
      (cluster, relOptSchema, rootSchema, statement) -> {
        final CalciteSchema schema =
            CalciteSchema.from(
                Util.first(config.getDefaultSchema(), rootSchema));
        return action.apply(cluster, relOptSchema, schema.root().plus());
      });
}
 
Example 28
Source Project: streamline   Source File: TestCompilerUtils.java    License: Apache License 2.0 5 votes vote down vote up
public static CalciteState sqlOverDummyTable(String sql)
        throws RelConversionException, ValidationException, SqlParseException {
    SchemaPlus schema = Frameworks.createRootSchema(true);
    JavaTypeFactory typeFactory = new JavaTypeFactoryImpl
            (RelDataTypeSystem.DEFAULT);
    StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory)
            .field("ID", SqlTypeName.INTEGER)
            .field("NAME", typeFactory.createType(String.class))
            .field("ADDR", typeFactory.createType(String.class))
            .build();
    Table table = streamableTable.stream();
    schema.add("FOO", table);
    schema.add("BAR", table);
    schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));

    List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
    sqlOperatorTables.add(SqlStdOperatorTable.instance());
    sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
            false,
            Collections.<String>emptyList(), typeFactory));
    SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
    FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(
            schema).operatorTable(chainedSqlOperatorTable).build();
    Planner planner = Frameworks.getPlanner(config);
    SqlNode parse = planner.parse(sql);
    SqlNode validate = planner.validate(parse);
    RelNode tree = planner.convert(validate);
    System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
    return new CalciteState(schema, tree);
}
 
Example 29
Source Project: quark   Source File: QuarkMetaImpl.java    License: Apache License 2.0 5 votes vote down vote up
Enumerable<MetaCatalog> catalogs() {
  return Linq4j.asEnumerable(
      CalciteSchema.from(getConnection().getRootSchema()).getSubSchemaMap().values())
      .select(
          new Function1<CalciteSchema, MetaCatalog>() {
            public MetaCatalog apply(CalciteSchema calciteSchema) {
              return new MetaCatalog(calciteSchema.getName());
            }
          });
}
 
Example 30
Source Project: quark   Source File: QuarkViewTable.java    License: Apache License 2.0 5 votes vote down vote up
public QuarkViewTable(QuarkSchema schema,
                      String name,
                      RelOptTableImpl relOptTable,
                      QuarkTable backupTable,
                      CalciteSchema tableSchema) {
  super(schema, name, backupTable.getColumns());
  this.backUpRelOptTable = relOptTable;
  this.backupTable = backupTable;
  this.backupTableSchema = tableSchema;
}