org.apache.calcite.tools.RuleSet Java Examples

The following examples show how to use org.apache.calcite.tools.RuleSet. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SqlHintsConverterTest.java    From calcite with Apache License 2.0 6 votes vote down vote up
@Test void testUseMergeJoin() {
  final String sql = "select /*+ use_merge_join(emp, dept) */\n"
      + "ename, job, sal, dept.name\n"
      + "from emp join dept on emp.deptno = dept.deptno";
  RelOptPlanner planner = new VolcanoPlanner();
  planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
  planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
  Tester tester1 = tester.withDecorrelation(true)
      .withClusterFactory(
          relOptCluster -> RelOptCluster.create(planner, relOptCluster.getRexBuilder()));
  final RelNode rel = tester1.convertSqlToRel(sql).rel;
  RuleSet ruleSet = RuleSets.ofList(
      EnumerableRules.ENUMERABLE_MERGE_JOIN_RULE,
      EnumerableRules.ENUMERABLE_JOIN_RULE,
      EnumerableRules.ENUMERABLE_PROJECT_RULE,
      EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE,
      EnumerableRules.ENUMERABLE_SORT_RULE,
      AbstractConverter.ExpandConversionRule.INSTANCE);
  Program program = Programs.of(ruleSet);
  RelTraitSet toTraits = rel
      .getCluster()
      .traitSet()
      .replace(EnumerableConvention.INSTANCE);

  RelNode relAfter = program.run(planner, rel, toTraits,
      Collections.emptyList(), Collections.emptyList());

  String planAfter = NL + RelOptUtil.toString(relAfter);
  getDiffRepos().assertEquals("planAfter", "${planAfter}", planAfter);
}
 
Example #2
Source File: SortRemoveRuleTest.java    From calcite with Apache License 2.0 6 votes vote down vote up
/** Test case for
 * <a href="https://issues.apache.org/jira/browse/CALCITE-2554">[CALCITE-2554]
 * Enrich enumerable join operators with order preserving information</a>.
 *
 * <p>Since join inputs are sorted, and this join preserves the order of the
 * left input, there shouldn't be any sort operator above the join.
 */
@Test void removeSortOverEnumerableSemiJoin() throws Exception {
  RuleSet prepareRules =
      RuleSets.ofList(
          SortProjectTransposeRule.INSTANCE,
          SemiJoinRule.PROJECT,
          SemiJoinRule.JOIN,
          EnumerableRules.ENUMERABLE_PROJECT_RULE,
          EnumerableRules.ENUMERABLE_SORT_RULE,
          EnumerableRules.ENUMERABLE_JOIN_RULE,
          EnumerableRules.ENUMERABLE_FILTER_RULE,
          EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE);
  String sql =
      "select e.\"deptno\" from \"hr\".\"emps\" e\n"
          + " where e.\"deptno\" in (select d.\"deptno\" from \"hr\".\"depts\" d)\n"
          + " order by e.\"empid\"";
  RelNode actualPlan = transform(sql, prepareRules);
  assertThat(
      toString(actualPlan),
      allOf(
          containsString("EnumerableHashJoin"),
          not(containsString("EnumerableSort"))));
}
 
Example #3
Source File: SortRemoveRuleTest.java    From calcite with Apache License 2.0 6 votes vote down vote up
/** Test case for
 * <a href="https://issues.apache.org/jira/browse/CALCITE-2554">[CALCITE-2554]
 * Enrich enumerable join operators with order preserving information</a>.
 *
 * <p>Since join inputs are sorted, and this join preserves the order of the
 * left input, there shouldn't be any sort operator above the join.
 *
 * <p>Until CALCITE-2018 is fixed we can add back EnumerableRules.ENUMERABLE_SORT_RULE
 */
@Test void removeSortOverEnumerableCorrelate() throws Exception {
  RuleSet prepareRules =
      RuleSets.ofList(
          SortProjectTransposeRule.INSTANCE,
          JoinToCorrelateRule.INSTANCE,
          EnumerableRules.ENUMERABLE_PROJECT_RULE,
          EnumerableRules.ENUMERABLE_CORRELATE_RULE,
          EnumerableRules.ENUMERABLE_FILTER_RULE,
          EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE);
  for (String joinType : Arrays.asList("left", "inner")) {
    String sql =
        "select e.\"deptno\" from \"hr\".\"emps\" e "
            + joinType + " join \"hr\".\"depts\" d "
            + " on e.\"deptno\" = d.\"deptno\" "
            + "order by e.\"empid\" ";
    RelNode actualPlan = transform(sql, prepareRules);
    assertThat(
        toString(actualPlan),
        allOf(
            containsString("EnumerableCorrelate"),
            not(containsString("EnumerableSort"))));
  }
}
 
Example #4
Source File: SortRemoveRuleTest.java    From calcite with Apache License 2.0 6 votes vote down vote up
/** Test case for
 * <a href="https://issues.apache.org/jira/browse/CALCITE-2554">[CALCITE-2554]
 * Enrich enumerable join operators with order preserving information</a>.
 *
 * <p>Since join inputs are sorted, and this join preserves the order of the
 * left input, there shouldn't be any sort operator above the join.
 */
@Test void removeSortOverEnumerableNestedLoopJoin() throws Exception {
  RuleSet prepareRules =
      RuleSets.ofList(
          SortProjectTransposeRule.INSTANCE,
          EnumerableRules.ENUMERABLE_JOIN_RULE,
          EnumerableRules.ENUMERABLE_PROJECT_RULE,
          EnumerableRules.ENUMERABLE_SORT_RULE,
          EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE);
  // Inner join is not considered since the ENUMERABLE_JOIN_RULE does not generate a nestedLoop
  // join in the case of inner joins.
  for (String joinType : Arrays.asList("left", "right", "full")) {
    String sql =
        "select e.\"deptno\" from \"hr\".\"emps\" e "
            + joinType + " join \"hr\".\"depts\" d "
            + " on e.\"deptno\" > d.\"deptno\" "
            + "order by e.\"empid\" ";
    RelNode actualPlan = transform(sql, prepareRules);
    assertThat(
        toString(actualPlan),
        allOf(
            containsString("EnumerableNestedLoopJoin"),
            not(containsString("EnumerableSort"))));
  }
}
 
Example #5
Source File: SortRemoveRuleTest.java    From calcite with Apache License 2.0 6 votes vote down vote up
/** Test case for
 * <a href="https://issues.apache.org/jira/browse/CALCITE-2554">[CALCITE-2554]
 * Enrich enumerable join operators with order preserving information</a>.
 *
 * <p>Since join inputs are sorted, and this join preserves the order of the
 * left input, there shouldn't be any sort operator above the join.
 */
@Test void removeSortOverEnumerableHashJoin() throws Exception {
  RuleSet prepareRules =
      RuleSets.ofList(
          SortProjectTransposeRule.INSTANCE,
          EnumerableRules.ENUMERABLE_JOIN_RULE,
          EnumerableRules.ENUMERABLE_PROJECT_RULE,
          EnumerableRules.ENUMERABLE_SORT_RULE,
          EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE);
  for (String joinType : Arrays.asList("left", "right", "full", "inner")) {
    String sql =
        "select e.\"deptno\" from \"hr\".\"emps\" e "
            + joinType + " join \"hr\".\"depts\" d "
            + " on e.\"deptno\" = d.\"deptno\" "
            + "order by e.\"empid\" ";
    RelNode actualPlan = transform(sql, prepareRules);
    assertThat(
        toString(actualPlan),
        allOf(
            containsString("EnumerableHashJoin"),
            not(containsString("EnumerableSort"))));
  }
}
 
Example #6
Source File: SortRemoveRuleTest.java    From calcite with Apache License 2.0 6 votes vote down vote up
/**
 * The default schema that is used in these tests provides tables sorted on the primary key. Due
 * to this scan operators always come with a {@link org.apache.calcite.rel.RelCollation} trait.
 */
private RelNode transform(String sql, RuleSet prepareRules) throws Exception {
  final SchemaPlus rootSchema = Frameworks.createRootSchema(true);
  final SchemaPlus defSchema = rootSchema.add("hr", new HrClusteredSchema());
  final FrameworkConfig config = Frameworks.newConfigBuilder()
      .parserConfig(SqlParser.Config.DEFAULT)
      .defaultSchema(defSchema)
      .traitDefs(ConventionTraitDef.INSTANCE, RelCollationTraitDef.INSTANCE)
      .programs(
          Programs.of(prepareRules),
          Programs.ofRules(SortRemoveRule.INSTANCE))
      .build();
  Planner planner = Frameworks.getPlanner(config);
  SqlNode parse = planner.parse(sql);
  SqlNode validate = planner.validate(parse);
  RelRoot planRoot = planner.rel(validate);
  RelNode planBefore = planRoot.rel;
  RelTraitSet desiredTraits = planBefore.getTraitSet()
      .replace(EnumerableConvention.INSTANCE);
  RelNode planAfter = planner.transform(0, desiredTraits, planBefore);
  return planner.transform(1, desiredTraits, planAfter);
}
 
Example #7
Source File: SqlWorker.java    From quark with Apache License 2.0 5 votes vote down vote up
private List<Program> getPrograms() {
  ImmutableList.Builder<Program> builder
      = ImmutableList.builder();
  for (RuleSet ruleSet: getRules()) {
    builder.add(Programs.sequence(
        new EnumerableProgram(ruleSet, this.context, this.plannerHolder),
        Programs.CALC_PROGRAM));
  }
  return builder.build();
}
 
Example #8
Source File: TraitPropagationTest.java    From calcite with Apache License 2.0 5 votes vote down vote up
private static RelNode run(PropAction action, RuleSet rules)
    throws Exception {

  FrameworkConfig config = Frameworks.newConfigBuilder()
      .ruleSets(rules).build();

  final Properties info = new Properties();
  final Connection connection = DriverManager
      .getConnection("jdbc:calcite:", info);
  final CalciteServerStatement statement = connection
      .createStatement().unwrap(CalciteServerStatement.class);
  final CalcitePrepare.Context prepareContext =
        statement.createPrepareContext();
  final JavaTypeFactory typeFactory = prepareContext.getTypeFactory();
  CalciteCatalogReader catalogReader =
        new CalciteCatalogReader(prepareContext.getRootSchema(),
            prepareContext.getDefaultSchemaPath(),
            typeFactory,
            prepareContext.config());
  final RexBuilder rexBuilder = new RexBuilder(typeFactory);
  final RelOptPlanner planner = new VolcanoPlanner(config.getCostFactory(),
      config.getContext());

  // set up rules before we generate cluster
  planner.clearRelTraitDefs();
  planner.addRelTraitDef(RelCollationTraitDef.INSTANCE);
  planner.addRelTraitDef(ConventionTraitDef.INSTANCE);

  planner.clear();
  for (RelOptRule r : rules) {
    planner.addRule(r);
  }

  final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder);
  return action.apply(cluster, catalogReader,
      prepareContext.getRootSchema().plus());
}
 
Example #9
Source File: SqlHintsConverterTest.java    From calcite with Apache License 2.0 5 votes vote down vote up
@Test void testHintsPropagationInVolcanoPlannerRules() {
  final String sql = "select /*+ use_hash_join(r, s), use_hash_join(emp, dept) */\n"
      + "ename, job, sal, dept.name\n"
      + "from emp join dept on emp.deptno = dept.deptno";
  RelOptPlanner planner = new VolcanoPlanner();
  planner.addRelTraitDef(ConventionTraitDef.INSTANCE);
  Tester tester1 = tester.withDecorrelation(true)
      .withClusterFactory(
        relOptCluster -> RelOptCluster.create(planner, relOptCluster.getRexBuilder()));
  final RelNode rel = tester1.convertSqlToRel(sql).rel;
  final RelHint hint = RelHint.builder("USE_HASH_JOIN")
      .inheritPath(0)
      .hintOption("EMP")
      .hintOption("DEPT")
      .build();
  // Validate Volcano planner.
  RuleSet ruleSet = RuleSets.ofList(
      new MockEnumerableJoinRule(hint), // Rule to validate the hint.
      FilterProjectTransposeRule.INSTANCE,
      FilterMergeRule.INSTANCE,
      ProjectMergeRule.INSTANCE,
      EnumerableRules.ENUMERABLE_JOIN_RULE,
      EnumerableRules.ENUMERABLE_PROJECT_RULE,
      EnumerableRules.ENUMERABLE_FILTER_RULE,
      EnumerableRules.ENUMERABLE_SORT_RULE,
      EnumerableRules.ENUMERABLE_LIMIT_RULE,
      EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE);
  Program program = Programs.of(ruleSet);
  RelTraitSet toTraits = rel
      .getCluster()
      .traitSet()
      .replace(EnumerableConvention.INSTANCE);

  program.run(planner, rel, toTraits,
      Collections.emptyList(), Collections.emptyList());
}
 
Example #10
Source File: SqlHandlerConfig.java    From Bats with Apache License 2.0 5 votes vote down vote up
public RuleSet getRules(PlannerPhase phase) {
  Collection<StoragePlugin> plugins = Lists.newArrayList();
  for (Entry<String, StoragePlugin> k : context.getStorage()) {
    plugins.add(k.getValue());
  }
  return phase.getRules(context, plugins);
}
 
Example #11
Source File: RuleSets.java    From quark with Apache License 2.0 5 votes vote down vote up
/**
 * Gets the default set of rules for planning process
 */
public static RuleSet getDefaultRuleSet() {
  ImmutableList.Builder<RelOptRule> builder
      = ImmutableList.builder();
  builder.addAll(DEFAULT_RULES).addAll(ENUMERABLE_RULES);
  return new QuarkRuleSet(builder.build());
}
 
Example #12
Source File: PlannerPhase.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
public static RuleSet mergedRuleSets(RuleSet... ruleSets) {
  final ImmutableSet.Builder<RelOptRule> relOptRuleSetBuilder = ImmutableSet.builder();
  for (final RuleSet ruleSet : ruleSets) {
    relOptRuleSetBuilder.addAll(ruleSet);
  }
  return RuleSets.ofList(relOptRuleSetBuilder.build());
}
 
Example #13
Source File: PlannerPhase.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
/**
 * Get the list of enabled reduce expression (logical) rules. These rules are enabled using session/system options.
 *
 * @param optimizerRulesContext used to get the list of planner settings, other rules may
 *                              also in the future need to get other query state from this,
 *                              such as the available list of UDFs (as is used by the
 *                              MergeProjectRule)
 * @return list of enabled reduce expression (logical) rules
 */
static RuleSet getEnabledReduceExpressionsRules(OptimizerRulesContext optimizerRulesContext) {
  final PlannerSettings ps = optimizerRulesContext.getPlannerSettings();

  // This list is used to store rules that can be turned on an off
  // by user facing planning options
  final ImmutableList.Builder<RelOptRule> userConfigurableRules = ImmutableList.builder();

  userConfigurableRules.add(ConvertCountDistinctToHll.INSTANCE);
  if (ps.options.getOption(PlannerSettings.REDUCE_ALGEBRAIC_EXPRESSIONS)) {
    userConfigurableRules.add(ReduceTrigFunctionsRule.INSTANCE);
  }

  if (ps.isConstantFoldingEnabled()) {
    // TODO - DRILL-2218, DX-2319
    if (ps.isReduceProjectExpressionsEnabled()) {
      userConfigurableRules.add(PROJECT_REDUCE_EXPRESSIONS_CALCITE_RULE);
    }
    if (ps.isReduceFilterExpressionsEnabled()) {
      userConfigurableRules.add(FILTER_REDUCE_EXPRESSIONS_CALCITE_RULE);
    }
    if (ps.isReduceCalcExpressionsEnabled()) {
      userConfigurableRules.add(CALC_REDUCE_EXPRESSIONS_CALCITE_RULE);
    }
  }
  return RuleSets.ofList(userConfigurableRules.build());
}
 
Example #14
Source File: CatalogServiceImpl.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
@Override
public RuleSet getStorageRules(OptimizerRulesContext context, PlannerPhase phase) {
  final ImmutableSet.Builder<RelOptRule> rules = ImmutableSet.builder();
  final Set<SourceType> types = new HashSet<>();

  try {
    for(ManagedStoragePlugin plugin : plugins.managed()){
      if(plugin.getState().getStatus() == SourceStatus.bad) {
        // we shouldn't consider rules for misbehaving plugins.
        continue;
      }
      final StoragePluginId pluginId = plugin.getId();

      StoragePluginRulesFactory factory = plugin.getRulesFactory();
      if(factory != null) {
        // add instance level rules.
        rules.addAll(factory.getRules(context, phase, pluginId));

        // add type level rules.
        if(types.add(pluginId.getType())) {
          rules.addAll(factory.getRules(context, phase, pluginId.getType()));
        }
      }
    }
  } catch (InstantiationException | IllegalAccessException e) {
    throw UserException.validationError(e).message("Failure getting plugin rules.").build(logger);
  }

  ImmutableSet<RelOptRule> rulesSet = rules.build();
  return RuleSets.ofList(rulesSet);
}
 
Example #15
Source File: SqlWorker.java    From quark with Apache License 2.0 4 votes vote down vote up
private RuleSet[] getRules() {
  RuleSet defaultRule = RuleSets.getDefaultRuleSet();
  RuleSet[] allRules = new RuleSet[] {defaultRule};
  return allRules;
}
 
Example #16
Source File: SqlWorker.java    From quark with Apache License 2.0 4 votes vote down vote up
private EnumerableProgram(RuleSet ruleSet, QueryContext context,
                          QuarkMaterializeCluster.RelOptPlannerHolder holder) {
  this.ruleSet = ruleSet;
  this.context = context;
  this.plannerHolder = holder;
}
 
Example #17
Source File: QueryContext.java    From dremio-oss with Apache License 2.0 4 votes vote down vote up
public RuleSet getInjectedRules(PlannerPhase phase) {
  return RuleSets.ofList(sabotContext.getInjectedRulesFactories()
      .stream()
      .flatMap(rf -> rf.getRules(phase, optionManager).stream())
      .collect(Collectors.toList()));
}
 
Example #18
Source File: SqlHandlerConfig.java    From dremio-oss with Apache License 2.0 4 votes vote down vote up
public RuleSet getRules(PlannerPhase phase) {
  return PlannerPhase.mergedRuleSets(
      context.getInjectedRules(phase),
      phase.getRules(context),
      context.getCatalogService().getStorageRules(context, phase));
}
 
Example #19
Source File: EnumerableLimitRuleTest.java    From calcite with Apache License 2.0 4 votes vote down vote up
/** Test case for
 * <a href="https://issues.apache.org/jira/browse/CALCITE-2941">[CALCITE-2941]
 * EnumerableLimitRule on Sort with no collation creates EnumerableLimit with
 * wrong traitSet and cluster</a>.
 */
@Test void enumerableLimitOnEmptySort() throws Exception {
  RuleSet prepareRules =
      RuleSets.ofList(
          EnumerableRules.ENUMERABLE_FILTER_RULE,
          EnumerableRules.ENUMERABLE_SORT_RULE,
          EnumerableRules.ENUMERABLE_LIMIT_RULE,
          EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE);
  SchemaPlus rootSchema = Frameworks.createRootSchema(true);
  SchemaPlus defSchema = rootSchema.add("hr", new HrClusteredSchema());
  FrameworkConfig config = Frameworks.newConfigBuilder()
      .parserConfig(SqlParser.Config.DEFAULT)
      .defaultSchema(defSchema)
      .traitDefs(ConventionTraitDef.INSTANCE, RelCollationTraitDef.INSTANCE)
      .programs(Programs.of(prepareRules))
      .build();

  RelBuilder builder = RelBuilder.create(config);
  RelNode planBefore = builder
      .scan("hr", "emps")
      .sort(builder.field(0)) // will produce collation [0] in the plan
      .filter(
          builder.notEquals(
              builder.field(0),
              builder.literal(100)))
      .limit(1, 5) // force a limit inside an "empty" Sort (with no collation)
      .build();

  RelTraitSet desiredTraits = planBefore.getTraitSet()
      .replace(EnumerableConvention.INSTANCE);
  Program program = Programs.of(prepareRules);
  RelNode planAfter = program.run(planBefore.getCluster().getPlanner(), planBefore,
      desiredTraits, ImmutableList.of(), ImmutableList.of());

  // verify that the collation [0] is not lost in the final plan
  final RelCollation collation =
      planAfter.getTraitSet().getTrait(RelCollationTraitDef.INSTANCE);
  assertThat(collation, notNullValue());
  final List<RelFieldCollation> fieldCollationList =
      collation.getFieldCollations();
  assertThat(fieldCollationList, notNullValue());
  assertThat(fieldCollationList.size(), is(1));
  assertThat(fieldCollationList.get(0).getFieldIndex(), is(0));
}
 
Example #20
Source File: PlannerPhase.java    From dremio-oss with Apache License 2.0 4 votes vote down vote up
static final RuleSet getPhysicalRules(OptimizerRulesContext optimizerRulesContext) {
  final List<RelOptRule> ruleList = new ArrayList<>();
  final PlannerSettings ps = optimizerRulesContext.getPlannerSettings();

  ruleList.add(SortConvertPrule.INSTANCE);
  ruleList.add(SortPrule.INSTANCE);
  ruleList.add(ProjectPrule.INSTANCE);
  ruleList.add(FlattenPrule.INSTANCE);
  ruleList.add(ScreenPrule.INSTANCE);
  ruleList.add(ExpandConversionRule.INSTANCE);
  ruleList.add(FilterPrule.INSTANCE);
  ruleList.add(LimitPrule.INSTANCE);
  ruleList.add(SamplePrule.INSTANCE);
  ruleList.add(SampleToLimitPrule.INSTANCE);
  ruleList.add(WriterPrule.INSTANCE);
  ruleList.add(WindowPrule.INSTANCE);
  ruleList.add(PushLimitToTopN.INSTANCE);
  ruleList.add(LimitUnionExchangeTransposeRule.INSTANCE);
  ruleList.add(UnionAllPrule.INSTANCE);
  ruleList.add(ValuesPrule.INSTANCE);
  ruleList.add(EmptyPrule.INSTANCE);
  ruleList.add(PushLimitToPruneableScan.INSTANCE);

  if (ps.isHashAggEnabled()) {
    ruleList.add(HashAggPrule.INSTANCE);
  }

  if (ps.isStreamAggEnabled()) {
    ruleList.add(StreamAggPrule.INSTANCE);
  }

  if (ps.isHashJoinEnabled()) {
    ruleList.add(HashJoinPrule.DIST_INSTANCE);

    if(ps.isBroadcastJoinEnabled()){
      ruleList.add(HashJoinPrule.BROADCAST_INSTANCE);
    }
  }

  if (ps.isMergeJoinEnabled()) {
    ruleList.add(MergeJoinPrule.DIST_INSTANCE);

    if(ps.isBroadcastJoinEnabled()){
      ruleList.add(MergeJoinPrule.BROADCAST_INSTANCE);
    }

  }

  // NLJ plans consist of broadcasting the right child, hence we need
  // broadcast join enabled.
  if (ps.isNestedLoopJoinEnabled() && ps.isBroadcastJoinEnabled()) {
    ruleList.add(NestedLoopJoinPrule.INSTANCE);
  }

  return RuleSets.ofList(ImmutableSet.copyOf(ruleList));
}
 
Example #21
Source File: CatalogService.java    From dremio-oss with Apache License 2.0 2 votes vote down vote up
/**
 * Collect all rules for StoragePlugins.
 * <p>
 * Collects the following:
 * - One set of rules for each storage plugin type.
 * - One set of rules for each storage plugin instance.
 *
 * @param context
 * @param phase
 * @return
 */
RuleSet getStorageRules(OptimizerRulesContext context, PlannerPhase phase);