Java Code Examples for org.apache.calcite.schema.SchemaPlus#add()

The following examples show how to use org.apache.calcite.schema.SchemaPlus#add() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ModelHandler.java    From calcite with Apache License 2.0 6 votes vote down vote up
public void visit(JsonJdbcSchema jsonSchema) {
  checkRequiredAttributes(jsonSchema, "name");
  final SchemaPlus parentSchema = currentMutableSchema("jdbc schema");
  final DataSource dataSource =
      JdbcSchema.dataSource(jsonSchema.jdbcUrl,
          jsonSchema.jdbcDriver,
          jsonSchema.jdbcUser,
          jsonSchema.jdbcPassword);
  final JdbcSchema schema;
  if (jsonSchema.sqlDialectFactory == null || jsonSchema.sqlDialectFactory.isEmpty()) {
    schema =
        JdbcSchema.create(parentSchema, jsonSchema.name, dataSource,
            jsonSchema.jdbcCatalog, jsonSchema.jdbcSchema);
  } else {
    SqlDialectFactory factory = AvaticaUtils.instantiatePlugin(
        SqlDialectFactory.class, jsonSchema.sqlDialectFactory);
    schema =
        JdbcSchema.create(parentSchema, jsonSchema.name, dataSource,
            factory, jsonSchema.jdbcCatalog, jsonSchema.jdbcSchema);
  }
  final SchemaPlus schemaPlus = parentSchema.add(jsonSchema.name, schema);
  populateSchema(jsonSchema, schemaPlus);
}
 
Example 2
Source File: SortRemoveRuleTest.java    From calcite with Apache License 2.0 6 votes vote down vote up
/**
 * The default schema that is used in these tests provides tables sorted on the primary key. Due
 * to this scan operators always come with a {@link org.apache.calcite.rel.RelCollation} trait.
 */
private RelNode transform(String sql, RuleSet prepareRules) throws Exception {
  final SchemaPlus rootSchema = Frameworks.createRootSchema(true);
  final SchemaPlus defSchema = rootSchema.add("hr", new HrClusteredSchema());
  final FrameworkConfig config = Frameworks.newConfigBuilder()
      .parserConfig(SqlParser.Config.DEFAULT)
      .defaultSchema(defSchema)
      .traitDefs(ConventionTraitDef.INSTANCE, RelCollationTraitDef.INSTANCE)
      .programs(
          Programs.of(prepareRules),
          Programs.ofRules(SortRemoveRule.INSTANCE))
      .build();
  Planner planner = Frameworks.getPlanner(config);
  SqlNode parse = planner.parse(sql);
  SqlNode validate = planner.validate(parse);
  RelRoot planRoot = planner.rel(validate);
  RelNode planBefore = planRoot.rel;
  RelTraitSet desiredTraits = planBefore.getTraitSet()
      .replace(EnumerableConvention.INSTANCE);
  RelNode planAfter = planner.transform(0, desiredTraits, planBefore);
  return planner.transform(1, desiredTraits, planAfter);
}
 
Example 3
Source File: QueryPlanner.java    From samza with Apache License 2.0 6 votes vote down vote up
private void registerSourceSchemas(SchemaPlus rootSchema) {
  RelSchemaConverter relSchemaConverter = new RelSchemaConverter();

  for (SqlIOConfig ssc : systemStreamConfigBySource.values()) {
    SchemaPlus previousLevelSchema = rootSchema;
    List<String> sourceParts = ssc.getSourceParts();
    RelSchemaProvider relSchemaProvider = relSchemaProviders.get(ssc.getSource());

    for (int sourcePartIndex = 0; sourcePartIndex < sourceParts.size(); sourcePartIndex++) {
      String sourcePart = sourceParts.get(sourcePartIndex);
      if (sourcePartIndex < sourceParts.size() - 1) {
        SchemaPlus sourcePartSchema = previousLevelSchema.getSubSchema(sourcePart);
        if (sourcePartSchema == null) {
          sourcePartSchema = previousLevelSchema.add(sourcePart, new AbstractSchema());
        }
        previousLevelSchema = sourcePartSchema;
      } else {
        // If the source part is the last one, then fetch the schema corresponding to the stream and register.
        RelDataType relationalSchema = getSourceRelSchema(relSchemaProvider, relSchemaConverter);
        previousLevelSchema.add(sourcePart, createTableFromRelSchema(relationalSchema));
        break;
      }
    }
  }
}
 
Example 4
Source File: CalciteSolrDriver.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public Connection connect(String url, Properties info) throws SQLException {
  if(!this.acceptsURL(url)) {
    return null;
  }

  Connection connection = super.connect(url, info);
  CalciteConnection calciteConnection = (CalciteConnection) connection;
  final SchemaPlus rootSchema = calciteConnection.getRootSchema();

  String schemaName = info.getProperty("zk");
  if(schemaName == null) {
    throw new SQLException("zk must be set");
  }
  final SolrSchema solrSchema = new SolrSchema(info, solrClientCache);
  rootSchema.add(schemaName, solrSchema);

  // Set the default schema
  calciteConnection.setSchema(schemaName);
  return calciteConnection;
}
 
Example 5
Source File: ModelHandler.java    From calcite with Apache License 2.0 6 votes vote down vote up
public void visit(JsonCustomTable jsonTable) {
  try {
    checkRequiredAttributes(jsonTable, "name", "factory");
    final SchemaPlus schema = currentMutableSchema("table");
    final TableFactory tableFactory =
        AvaticaUtils.instantiatePlugin(TableFactory.class,
            jsonTable.factory);
    final Table table =
        tableFactory.create(schema, jsonTable.name,
            operandMap(null, jsonTable.operand), null);
    for (JsonColumn column : jsonTable.columns) {
      column.accept(this);
    }
    schema.add(jsonTable.name, table);
  } catch (Exception e) {
    throw new RuntimeException("Error instantiating " + jsonTable, e);
  }
}
 
Example 6
Source File: MultiJdbcSchemaJoinTest.java    From calcite with Apache License 2.0 6 votes vote down vote up
private Connection setup() throws SQLException {
  // Create a jdbc database & table
  final String db = TempDb.INSTANCE.getUrl();
  Connection c1 = DriverManager.getConnection(db, "", "");
  Statement stmt1 = c1.createStatement();
  // This is a table we can join with the emps from the hr schema
  stmt1.execute("create table table1(id integer not null primary key, "
      + "field1 varchar(10))");
  stmt1.execute("insert into table1 values(100, 'foo')");
  stmt1.execute("insert into table1 values(200, 'bar')");
  c1.close();

  // Make a Calcite schema with both a jdbc schema and a non-jdbc schema
  Connection connection = DriverManager.getConnection("jdbc:calcite:");
  CalciteConnection calciteConnection =
      connection.unwrap(CalciteConnection.class);
  SchemaPlus rootSchema = calciteConnection.getRootSchema();
  rootSchema.add("DB",
      JdbcSchema.create(rootSchema, "DB",
          JdbcSchema.dataSource(db, "org.hsqldb.jdbcDriver", "", ""),
          null, null));
  rootSchema.add("hr", new ReflectiveSchema(new JdbcTest.HrSchema()));
  return connection;
}
 
Example 7
Source File: LealoneStoragePlugin.java    From Bats with Apache License 2.0 5 votes vote down vote up
void setHolder(SchemaPlus plusOfThis) {
    for (String s : getSubSchemaNames()) {
        CapitalizingJdbcSchema inner = getSubSchema(s);
        SchemaPlus holder = plusOfThis.add(s, inner);
        inner.setHolder(holder);
    }
}
 
Example 8
Source File: ModelHandler.java    From calcite with Apache License 2.0 5 votes vote down vote up
public void visit(JsonView jsonView) {
  try {
    checkRequiredAttributes(jsonView, "name");
    final SchemaPlus schema = currentMutableSchema("view");
    final List<String> path = Util.first(jsonView.path, currentSchemaPath());
    final List<String> viewPath = ImmutableList.<String>builder().addAll(path)
        .add(jsonView.name).build();
    schema.add(jsonView.name,
        ViewTable.viewMacro(schema, jsonView.getSql(), path, viewPath,
            jsonView.modifiable));
  } catch (Exception e) {
    throw new RuntimeException("Error instantiating " + jsonView, e);
  }
}
 
Example 9
Source File: SplunkDriver.java    From calcite with Apache License 2.0 5 votes vote down vote up
@Override public Connection connect(String url, Properties info)
    throws SQLException {
  Connection connection = super.connect(url, info);
  CalciteConnection calciteConnection = (CalciteConnection) connection;
  SplunkConnection splunkConnection;
  try {
    String url1 = info.getProperty("url");
    if (url1 == null) {
      throw new IllegalArgumentException(
          "Must specify 'url' property");
    }
    if (url1.equals("mock")) {
      splunkConnection = new MockSplunkConnection();
    } else {
      String user = info.getProperty("user");
      if (user == null) {
        throw new IllegalArgumentException(
            "Must specify 'user' property");
      }
      String password = info.getProperty("password");
      if (password == null) {
        throw new IllegalArgumentException(
            "Must specify 'password' property");
      }
      URL url2 = new URL(url1);
      splunkConnection = new SplunkConnectionImpl(url2, user, password);
    }
  } catch (Exception e) {
    throw new SQLException("Cannot connect", e);
  }
  final SchemaPlus rootSchema = calciteConnection.getRootSchema();
  rootSchema.add("splunk", new SplunkSchema(splunkConnection));

  return connection;
}
 
Example 10
Source File: TableFunctionTest.java    From calcite with Apache License 2.0 5 votes vote down vote up
/** As {@link #testScannableTableFunction()} but with named parameters. */
@Test void testScannableTableFunctionWithNamedParameters()
    throws SQLException, ClassNotFoundException {
  Connection connection = DriverManager.getConnection("jdbc:calcite:");
  CalciteConnection calciteConnection =
      connection.unwrap(CalciteConnection.class);
  SchemaPlus rootSchema = calciteConnection.getRootSchema();
  SchemaPlus schema = rootSchema.add("s", new AbstractSchema());
  final TableFunction table = TableFunctionImpl.create(Smalls.MAZE2_METHOD);
  schema.add("Maze", table);
  final String sql = "select *\n"
      + "from table(\"s\".\"Maze\"(5, 3, 1))";
  final Statement statement = connection.createStatement();
  ResultSet resultSet = statement.executeQuery(sql);
  final String result = "S=abcde\n"
      + "S=xyz\n";
  assertThat(CalciteAssert.toString(resultSet),
      is(result + "S=generate2(w=5, h=3, s=1)\n"));

  final String sql2 = "select *\n"
      + "from table(\"s\".\"Maze\"(WIDTH => 5, HEIGHT => 3, SEED => 1))";
  resultSet = statement.executeQuery(sql2);
  assertThat(CalciteAssert.toString(resultSet),
      is(result + "S=generate2(w=5, h=3, s=1)\n"));

  final String sql3 = "select *\n"
      + "from table(\"s\".\"Maze\"(HEIGHT => 3, WIDTH => 5))";
  resultSet = statement.executeQuery(sql3);
  assertThat(CalciteAssert.toString(resultSet),
      is(result + "S=generate2(w=5, h=3, s=null)\n"));
  connection.close();
}
 
Example 11
Source File: MetricsSqlQueryService.java    From nifi with Apache License 2.0 5 votes vote down vote up
private CachedStatement buildCachedStatement(final String sql, final ReportingContext context) throws Exception {

        final CalciteConnection connection = createConnection();
        final SchemaPlus rootSchema = createRootSchema(connection);

        final ConnectionStatusTable connectionStatusTable = new ConnectionStatusTable(context, getLogger());
        rootSchema.add("CONNECTION_STATUS", connectionStatusTable);
        if (context.isAnalyticsEnabled()) {
            final ConnectionStatusPredictionsTable connectionStatusPredictionsTable = new ConnectionStatusPredictionsTable(context, getLogger());
            rootSchema.add("CONNECTION_STATUS_PREDICTIONS", connectionStatusPredictionsTable);
        } else {
            getLogger().debug("Analytics is not enabled, CONNECTION_STATUS_PREDICTIONS table is not available for querying");
        }
        final ProcessorStatusTable processorStatusTable = new ProcessorStatusTable(context, getLogger());
        rootSchema.add("PROCESSOR_STATUS", processorStatusTable);
        final ProcessGroupStatusTable processGroupStatusTable = new ProcessGroupStatusTable(context, getLogger());
        rootSchema.add("PROCESS_GROUP_STATUS", processGroupStatusTable);
        final JvmMetricsTable jvmMetricsTable = new JvmMetricsTable(context, getLogger());
        rootSchema.add("JVM_METRICS", jvmMetricsTable);
        final BulletinTable bulletinTable = new BulletinTable(context, getLogger());
        rootSchema.add("BULLETINS", bulletinTable);
        final ProvenanceTable provenanceTable = new ProvenanceTable(context, getLogger());
        rootSchema.add("PROVENANCE", provenanceTable);

        rootSchema.setCacheEnabled(false);

        final PreparedStatement stmt = connection.prepareStatement(sql);
        return new CachedStatement(stmt, connection);
    }
 
Example 12
Source File: CloneSchema.java    From calcite with Apache License 2.0 5 votes vote down vote up
public Schema create(
    SchemaPlus parentSchema,
    String name,
    Map<String, Object> operand) {
  SchemaPlus schema =
      parentSchema.add(name,
          JdbcSchema.create(parentSchema, name + "$source", operand));
  return new CloneSchema(schema);
}
 
Example 13
Source File: GeodeAllDataTypesTest.java    From calcite with Apache License 2.0 5 votes vote down vote up
private CalciteAssert.ConnectionFactory newConnectionFactory() {
  return new CalciteAssert.ConnectionFactory() {
    @Override public Connection createConnection() throws SQLException {
      final Connection connection = DriverManager.getConnection("jdbc:calcite:lex=JAVA");
      final SchemaPlus root = connection.unwrap(CalciteConnection.class).getRootSchema();

      root.add("geode",
          new GeodeSchema(
              POLICY.cache(),
              Collections.singleton("allDataTypesRegion")));

      return connection;
    }
  };
}
 
Example 14
Source File: ExceptionMessageTest.java    From calcite with Apache License 2.0 5 votes vote down vote up
@BeforeEach
public void setUp() throws SQLException {
  Connection connection = DriverManager.getConnection("jdbc:calcite:");
  CalciteConnection calciteConnection =
      connection.unwrap(CalciteConnection.class);
  SchemaPlus rootSchema = calciteConnection.getRootSchema();
  rootSchema.add("test", new ReflectiveSchema(new TestSchema()));
  calciteConnection.setSchema("test");
  this.conn = calciteConnection;
}
 
Example 15
Source File: LookupOperatorOverloadsTest.java    From calcite with Apache License 2.0 4 votes vote down vote up
private void checkInternal(boolean caseSensitive) throws SQLException {
  final SqlNameMatcher nameMatcher =
      SqlNameMatchers.withCaseSensitive(caseSensitive);
  final String schemaName = "MySchema";
  final String funcName = "MyFUNC";
  final String anotherName = "AnotherFunc";

  try (Connection connection = DriverManager.getConnection("jdbc:calcite:")) {
    CalciteConnection calciteConnection =
        connection.unwrap(CalciteConnection.class);
    SchemaPlus rootSchema = calciteConnection.getRootSchema();
    SchemaPlus schema = rootSchema.add(schemaName, new AbstractSchema());
    final TableFunction table = TableFunctionImpl.create(Smalls.MAZE_METHOD);
    schema.add(funcName, table);
    schema.add(anotherName, table);
    final TableFunction table2 =
        TableFunctionImpl.create(Smalls.MAZE3_METHOD);
    schema.add(funcName, table2);

    final CalciteServerStatement statement =
        connection.createStatement().unwrap(CalciteServerStatement.class);
    final CalcitePrepare.Context prepareContext =
        statement.createPrepareContext();
    final JavaTypeFactory typeFactory = prepareContext.getTypeFactory();
    CalciteCatalogReader reader =
        new CalciteCatalogReader(prepareContext.getRootSchema(),
            ImmutableList.of(), typeFactory, prepareContext.config());

    final List<SqlOperator> operatorList = new ArrayList<>();
    SqlIdentifier myFuncIdentifier =
        new SqlIdentifier(Lists.newArrayList(schemaName, funcName), null,
            SqlParserPos.ZERO, null);
    reader.lookupOperatorOverloads(myFuncIdentifier,
        SqlFunctionCategory.USER_DEFINED_TABLE_FUNCTION, SqlSyntax.FUNCTION,
        operatorList, nameMatcher);
    checkFunctionType(2, funcName, operatorList);

    operatorList.clear();
    reader.lookupOperatorOverloads(myFuncIdentifier,
        SqlFunctionCategory.USER_DEFINED_FUNCTION, SqlSyntax.FUNCTION,
        operatorList, nameMatcher);
    checkFunctionType(0, null, operatorList);

    operatorList.clear();
    SqlIdentifier anotherFuncIdentifier =
        new SqlIdentifier(Lists.newArrayList(schemaName, anotherName), null,
            SqlParserPos.ZERO, null);
    reader.lookupOperatorOverloads(anotherFuncIdentifier,
        SqlFunctionCategory.USER_DEFINED_TABLE_FUNCTION, SqlSyntax.FUNCTION,
        operatorList, nameMatcher);
    checkFunctionType(1, anotherName, operatorList);
  }
}
 
Example 16
Source File: SystemTablePlugin.java    From Bats with Apache License 2.0 4 votes vote down vote up
@Override
public void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) {
  parent.add(schema.getName(), schema);
}
 
Example 17
Source File: SelectMapper.java    From Alink with Apache License 2.0 4 votes vote down vote up
public static void registerFlinkBuiltInFunctions(SchemaPlus schema) {
    BiConsumer<String, Method> addScalarFunctionConsumer =
        (k, v) -> schema.add(k, ScalarFunctionImpl.create(v));

    addScalarFunctionConsumer.accept("LOG2", MathFunctions.LOG2);
    addScalarFunctionConsumer.accept("LOG2", MathFunctions.LOG2_DEC);
    addScalarFunctionConsumer.accept("LOG", MathFunctions.LOG);
    addScalarFunctionConsumer.accept("LOG", MathFunctions.LOG_DEC);
    addScalarFunctionConsumer.accept("LOG", MathFunctions.LOG_WITH_BASE);
    addScalarFunctionConsumer.accept("LOG", MathFunctions.LOG_WITH_BASE_DEC_DOU);
    addScalarFunctionConsumer.accept("LOG", MathFunctions.LOG_WITH_BASE_DOU_DEC);
    addScalarFunctionConsumer.accept("LOG", MathFunctions.LOG_WITH_BASE_DEC_DEC);
    addScalarFunctionConsumer.accept("SINH", MathFunctions.SINH);
    addScalarFunctionConsumer.accept("SINH", MathFunctions.SINH_DEC);
    addScalarFunctionConsumer.accept("COSH", MathFunctions.COSH);
    addScalarFunctionConsumer.accept("COSH", MathFunctions.COSH_DEC);
    addScalarFunctionConsumer.accept("TANH", MathFunctions.TANH);
    addScalarFunctionConsumer.accept("TANH", MathFunctions.TANH_DEC);
    addScalarFunctionConsumer.accept("UUID", MathFunctions.UUID);
    addScalarFunctionConsumer.accept("BIN", MathFunctions.BIN);
    addScalarFunctionConsumer.accept("HEX", MathFunctions.HEX_LONG);
    addScalarFunctionConsumer.accept("HEX", MathFunctions.HEX_STRING);

    addScalarFunctionConsumer.accept("FROM_BASE64", StringFunctions.FROMBASE64);
    addScalarFunctionConsumer.accept("TO_BASE64", StringFunctions.TOBASE64);
    addScalarFunctionConsumer.accept("LPAD", StringFunctions.LPAD);
    addScalarFunctionConsumer.accept("RPAD", StringFunctions.RPAD);
    addScalarFunctionConsumer.accept("REGEXP_REPLACE", StringFunctions.REGEXP_REPLACE);
    addScalarFunctionConsumer.accept("REGEXP_EXTRACT", StringFunctions.REGEXP_EXTRACT);

    addScalarFunctionConsumer.accept("LTRIM", BuiltInMethod.LTRIM.method);
    addScalarFunctionConsumer.accept("RTRIM", BuiltInMethod.RTRIM.method);

    addScalarFunctionConsumer.accept("MD5", StringFunctions.MD5);
    addScalarFunctionConsumer.accept("SHA1", StringFunctions.SHA1);
    addScalarFunctionConsumer.accept("SHA224", StringFunctions.SHA224);
    addScalarFunctionConsumer.accept("SHA256", StringFunctions.SHA256);
    addScalarFunctionConsumer.accept("SHA384", StringFunctions.SHA384);
    addScalarFunctionConsumer.accept("SHA512", StringFunctions.SHA512);
    addScalarFunctionConsumer.accept("SHA2", StringFunctions.SHA2);
}
 
Example 18
Source File: TableFunctionTest.java    From calcite with Apache License 2.0 4 votes vote down vote up
/** As {@link #testScannableTableFunction()} but with named parameters. */
@Test void testMultipleScannableTableFunctionWithNamedParameters()
    throws SQLException, ClassNotFoundException {
  try (Connection connection = DriverManager.getConnection("jdbc:calcite:");
       Statement statement = connection.createStatement()) {
    CalciteConnection calciteConnection =
        connection.unwrap(CalciteConnection.class);
    SchemaPlus rootSchema = calciteConnection.getRootSchema();
    SchemaPlus schema = rootSchema.add("s", new AbstractSchema());
    final TableFunction table1 = TableFunctionImpl.create(Smalls.MAZE_METHOD);
    schema.add("Maze", table1);
    final TableFunction table2 = TableFunctionImpl.create(Smalls.MAZE2_METHOD);
    schema.add("Maze", table2);
    final TableFunction table3 = TableFunctionImpl.create(Smalls.MAZE3_METHOD);
    schema.add("Maze", table3);
    final String sql = "select *\n"
        + "from table(\"s\".\"Maze\"(5, 3, 1))";
    ResultSet resultSet = statement.executeQuery(sql);
    final String result = "S=abcde\n"
        + "S=xyz\n";
    assertThat(CalciteAssert.toString(resultSet),
        is(result + "S=generate(w=5, h=3, s=1)\n"));

    final String sql2 = "select *\n"
        + "from table(\"s\".\"Maze\"(WIDTH => 5, HEIGHT => 3, SEED => 1))";
    resultSet = statement.executeQuery(sql2);
    assertThat(CalciteAssert.toString(resultSet),
        is(result + "S=generate2(w=5, h=3, s=1)\n"));

    final String sql3 = "select *\n"
        + "from table(\"s\".\"Maze\"(HEIGHT => 3, WIDTH => 5))";
    resultSet = statement.executeQuery(sql3);
    assertThat(CalciteAssert.toString(resultSet),
        is(result + "S=generate2(w=5, h=3, s=null)\n"));

    final String sql4 = "select *\n"
        + "from table(\"s\".\"Maze\"(FOO => 'a'))";
    resultSet = statement.executeQuery(sql4);
    assertThat(CalciteAssert.toString(resultSet),
        is(result + "S=generate3(foo=a)\n"));
  }
}
 
Example 19
Source File: InfoSchemaStoragePlugin.java    From Bats with Apache License 2.0 4 votes vote down vote up
@Override
public void registerSchemas(SchemaConfig schemaConfig, SchemaPlus parent) {
  ISchema s = new ISchema(this);
  parent.add(s.getName(), s);
}
 
Example 20
Source File: TestCompilerUtils.java    From streamline with Apache License 2.0 4 votes vote down vote up
public static CalciteState sqlOverNestedTable(String sql)
        throws RelConversionException, ValidationException, SqlParseException {
    SchemaPlus schema = Frameworks.createRootSchema(true);
    JavaTypeFactory typeFactory = new JavaTypeFactoryImpl
            (RelDataTypeSystem.DEFAULT);

    StreamableTable streamableTable = new CompilerUtil.TableBuilderInfo(typeFactory)
            .field("ID", SqlTypeName.INTEGER)
            .field("MAPFIELD",
                    typeFactory.createTypeWithNullability(
                            typeFactory.createMapType(
                                    typeFactory.createTypeWithNullability(
                                            typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
                                    typeFactory.createTypeWithNullability(
                                            typeFactory.createSqlType(SqlTypeName.INTEGER), true))
                            , true))
            .field("NESTEDMAPFIELD",
                    typeFactory.createTypeWithNullability(
                        typeFactory.createMapType(
                                typeFactory.createTypeWithNullability(
                                        typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
                                typeFactory.createTypeWithNullability(
                                        typeFactory.createMapType(
                                                typeFactory.createTypeWithNullability(
                                                        typeFactory.createSqlType(SqlTypeName.VARCHAR), true),
                                                typeFactory.createTypeWithNullability(
                                                        typeFactory.createSqlType(SqlTypeName.INTEGER), true))
                                        , true))
                                    , true))
            .field("ARRAYFIELD", typeFactory.createTypeWithNullability(
                    typeFactory.createArrayType(
                        typeFactory.createTypeWithNullability(
                            typeFactory.createSqlType(SqlTypeName.INTEGER), true), -1L)
                    , true))
            .build();
    Table table = streamableTable.stream();
    schema.add("FOO", table);
    schema.add("BAR", table);
    schema.add("MYPLUS", ScalarFunctionImpl.create(MyPlus.class, "eval"));
    List<SqlOperatorTable> sqlOperatorTables = new ArrayList<>();
    sqlOperatorTables.add(SqlStdOperatorTable.instance());
    sqlOperatorTables.add(new CalciteCatalogReader(CalciteSchema.from(schema),
                                                   false,
                                                   Collections.<String>emptyList(), typeFactory));
    SqlOperatorTable chainedSqlOperatorTable = new ChainedSqlOperatorTable(sqlOperatorTables);
    FrameworkConfig config = Frameworks.newConfigBuilder().defaultSchema(
            schema).operatorTable(chainedSqlOperatorTable).build();
    Planner planner = Frameworks.getPlanner(config);
    SqlNode parse = planner.parse(sql);
    SqlNode validate = planner.validate(parse);
    RelNode tree = planner.convert(validate);
    System.out.println(RelOptUtil.toString(tree, SqlExplainLevel.ALL_ATTRIBUTES));
    return new CalciteState(schema, tree);
}