org.apache.calcite.rel.logical.LogicalTableScan Java Examples

The following examples show how to use org.apache.calcite.rel.logical.LogicalTableScan. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StreamRules.java    From Bats with Apache License 2.0 6 votes vote down vote up
@Override public void onMatch(RelOptRuleCall call) {
  final Delta delta = call.rel(0);
  final TableScan scan = call.rel(1);
  final RelOptCluster cluster = delta.getCluster();
  final RelOptTable relOptTable = scan.getTable();
  final StreamableTable streamableTable =
      relOptTable.unwrap(StreamableTable.class);
  if (streamableTable != null) {
    final Table table1 = streamableTable.stream();
    final RelOptTable relOptTable2 =
        RelOptTableImpl.create(relOptTable.getRelOptSchema(),
            relOptTable.getRowType(), table1,
            ImmutableList.<String>builder()
                .addAll(relOptTable.getQualifiedName())
                .add("(STREAM)").build());
    final LogicalTableScan newScan =
        LogicalTableScan.create(cluster, relOptTable2);
    call.transformTo(newScan);
  }
}
 
Example #2
Source File: MockCatalogReader.java    From calcite with Apache License 2.0 6 votes vote down vote up
@Override public RelNode toRel(ToRelContext context) {
  RelNode rel = LogicalTableScan.create(context.getCluster(), fromTable,
      context.getTableHints());
  final RexBuilder rexBuilder = context.getCluster().getRexBuilder();
  rel = LogicalFilter.create(
      rel, getConstraint(rexBuilder, rel.getRowType()));
  final List<RelDataTypeField> fieldList =
      rel.getRowType().getFieldList();
  final List<Pair<RexNode, String>> projects =
      new AbstractList<Pair<RexNode, String>>() {
        @Override public Pair<RexNode, String> get(int index) {
          return RexInputRef.of2(mapping.get(index), fieldList);
        }

        @Override public int size() {
          return mapping.size();
        }
      };
  return LogicalProject.create(rel,
      ImmutableList.of(),
      Pair.left(projects),
      Pair.right(projects));
}
 
Example #3
Source File: PushProjectIntoTableSourceScanRule.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public boolean matches(RelOptRuleCall call) {
	LogicalTableScan scan = call.rel(1);
	TableSourceTable tableSourceTable = scan.getTable().unwrap(TableSourceTable.class);
	if (tableSourceTable == null || !(tableSourceTable.tableSource() instanceof SupportsProjectionPushDown)) {
		return false;
	}
	SupportsProjectionPushDown pushDownSource = (SupportsProjectionPushDown) tableSourceTable.tableSource();
	if (pushDownSource.supportsNestedProjection()) {
		throw new TableException("Nested projection push down is unsupported now. \n" +
				"Please disable nested projection (SupportsProjectionPushDown#supportsNestedProjection returns false), " +
				"planner will push down the top-level columns.");
	} else {
		return true;
	}
}
 
Example #4
Source File: InstanceAccessByClassIdRule.java    From mat-calcite-plugin with Apache License 2.0 6 votes vote down vote up
@Override
public void onMatch(RelOptRuleCall call) {
    InstanceByClassTableScan scan = call.rel(0);
    RelOptTable table = scan.getTable();
    RelOptSchema schema = table.getRelOptSchema();
    List<String> indexName = new ArrayList<String>(table.getQualifiedName());
    indexName.set(indexName.size() - 1, "$ids$:" + indexName.get(indexName.size() - 1));
    LogicalTableScan ids = LogicalTableScan.create(scan.getCluster(), schema.getTableForMember(indexName));

    InstanceByClassTable instanceByClassTable = table.unwrap(InstanceByClassTable.class);
    int snapshotId = SnapshotHolder.put(instanceByClassTable.snapshot);

    RelOptCluster cluster = scan.getCluster();
    RexInputRef objectId = cluster.getRexBuilder().makeInputRef(ids, 0);
    RexBuilderContext rexContext = new ExecutionRexBuilderContext(cluster, snapshotId, objectId);

    List<Function<RexBuilderContext, RexNode>> resolvers = instanceByClassTable.getResolvers();
    List<RexNode> exprs = new ArrayList<RexNode>(resolvers.size());
    for (Function<RexBuilderContext, RexNode> resolver : resolvers) {
        exprs.add(resolver.apply(rexContext));
    }
    call.transformTo(RelOptUtil.createProject(ids, exprs, table.getRowType().getFieldNames(), true));
}
 
Example #5
Source File: QueryOperationConverter.java    From flink with Apache License 2.0 6 votes vote down vote up
private RelNode convertToDataStreamScan(DataStreamQueryOperation<?> operation) {
	DataStreamTable<?> dataStreamTable = new DataStreamTable<>(
			operation.getDataStream(),
			operation.isProducesUpdates(),
			operation.isAccRetract(),
			operation.getFieldIndices(),
			operation.getTableSchema().getFieldNames(),
			operation.getStatistic(),
			scala.Option.apply(operation.getFieldNullables()));

	List<String> names;
	if (operation.getQualifiedName() != null) {
		names = operation.getQualifiedName();
	} else {
		String refId = String.format("Unregistered_DataStream_%s", operation.getDataStream().getId());
		names = Collections.singletonList(refId);
	}

	FlinkRelOptTable table = FlinkRelOptTable.create(
			relBuilder.getRelOptSchema(),
			dataStreamTable.getRowType(relBuilder.getTypeFactory()),
			names,
			dataStreamTable);
	return LogicalTableScan.create(relBuilder.getCluster(), table);
}
 
Example #6
Source File: QueryOperationConverter.java    From flink with Apache License 2.0 6 votes vote down vote up
private RelNode convertToDataStreamScan(DataStream<?> dataStream, int[] fieldIndices, TableSchema tableSchema) {
	DataStreamTable<?> dataStreamTable = new DataStreamTable<>(
		dataStream,
		false,
		false,
		fieldIndices,
		tableSchema.getFieldNames(),
		FlinkStatistic.UNKNOWN(),
		scala.Option.empty());

	String refId = String.format("Unregistered_DataStream_%s", dataStream.getId());
	FlinkRelOptTable table = FlinkRelOptTable.create(
		relBuilder.getRelOptSchema(),
		dataStreamTable.getRowType(relBuilder.getTypeFactory()),
		Collections.singletonList(refId),
		dataStreamTable);
	return LogicalTableScan.create(relBuilder.getCluster(), table);
}
 
Example #7
Source File: PushDownLogicTableRule.java    From Mycat2 with GNU General Public License v3.0 6 votes vote down vote up
@NotNull
private RelNode global(RelOptCluster cluster,
                       Bindables.BindableTableScan bindableTableScan,
                       RelOptSchema relOptSchema,
                       MycatLogicTable logicTable) {
    final HashSet<String> context = new HashSet<>();
    RelNode logicalTableScan;
    MycatPhysicalTable mycatPhysicalTable = logicTable.getMycatGlobalPhysicalTable(context);
    RelOptTable dataNode = RelOptTableImpl.create(
            relOptSchema,
            logicTable.getRowType(cluster.getTypeFactory()),//这里使用logicTable,避免类型不一致
            mycatPhysicalTable,
            ImmutableList.of(mycatPhysicalTable.getBackendTableInfo().getUniqueName()));
    logicalTableScan = LogicalTableScan.create(cluster, dataNode, ImmutableList.of());
    return RelOptUtil.createProject(RelOptUtil.createFilter(logicalTableScan, bindableTableScan.filters), bindableTableScan.projects);
}
 
Example #8
Source File: PlanCaptureAttemptObserver.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
@Override
public void planConvertedToRel(RelNode converted, long millisTaken) {
  final String convertedRelTree = toStringOrEmpty(converted, true);

  try {
    RelNode toSerialize = converted.accept(new RelShuttleImpl() {
      @Override
      public RelNode visit(TableScan scan) {
        return LogicalTableScan.create(scan.getCluster(), scan.getTable());
      }
    });

    serializedPlan = relSerializerFactory.getSerializer(converted.getCluster()).serializeToBytes(toSerialize);
  } catch (Throwable e) {
    logger.debug("Error", e);
  }

  planPhases.add(PlanPhaseProfile.newBuilder()
    .setPhaseName("Convert To Rel")
    .setDurationMillis(millisTaken)
    .setPlan(convertedRelTree)
    .build());
}
 
Example #9
Source File: QueryableRelBuilder.java    From calcite with Apache License 2.0 6 votes vote down vote up
RelNode toRel(Queryable<T> queryable) {
  if (queryable instanceof QueryableDefaults.Replayable) {
    //noinspection unchecked
    ((QueryableDefaults.Replayable) queryable).replay(this);
    return rel;
  }
  if (queryable instanceof AbstractTableQueryable) {
    final AbstractTableQueryable tableQueryable =
        (AbstractTableQueryable) queryable;
    final QueryableTable table = tableQueryable.table;
    final CalciteSchema.TableEntry tableEntry =
        CalciteSchema.from(tableQueryable.schema)
            .add(tableQueryable.tableName, tableQueryable.table);
    final RelOptTableImpl relOptTable =
        RelOptTableImpl.create(null, table.getRowType(translator.typeFactory),
            tableEntry, null);
    if (table instanceof TranslatableTable) {
      return ((TranslatableTable) table).toRel(translator.toRelContext(),
          relOptTable);
    } else {
      return LogicalTableScan.create(translator.cluster, relOptTable, ImmutableList.of());
    }
  }
  return translator.translate(queryable.getExpression());
}
 
Example #10
Source File: StreamRules.java    From calcite with Apache License 2.0 6 votes vote down vote up
@Override public void onMatch(RelOptRuleCall call) {
  final Delta delta = call.rel(0);
  final TableScan scan = call.rel(1);
  final RelOptCluster cluster = delta.getCluster();
  final RelOptTable relOptTable = scan.getTable();
  final StreamableTable streamableTable =
      relOptTable.unwrap(StreamableTable.class);
  if (streamableTable != null) {
    final Table table1 = streamableTable.stream();
    final RelOptTable relOptTable2 =
        RelOptTableImpl.create(relOptTable.getRelOptSchema(),
            relOptTable.getRowType(), table1,
            ImmutableList.<String>builder()
                .addAll(relOptTable.getQualifiedName())
                .add("(STREAM)").build());
    final LogicalTableScan newScan =
        LogicalTableScan.create(cluster, relOptTable2, scan.getHints());
    call.transformTo(newScan);
  }
}
 
Example #11
Source File: QueryOperationConverter.java    From flink with Apache License 2.0 5 votes vote down vote up
private RelNode convertToDataStreamScan(
		DataStream<?> dataStream,
		int[] fieldIndices,
		TableSchema tableSchema,
		Optional<ObjectIdentifier> identifier) {
	List<String> names;
	if (identifier.isPresent()) {
		names = Arrays.asList(
			identifier.get().getCatalogName(),
			identifier.get().getDatabaseName(),
			identifier.get().getObjectName());
	} else {
		String refId = String.format("Unregistered_DataStream_%s", dataStream.getId());
		names = Collections.singletonList(refId);
	}
		final RelDataType rowType = DataStreamTable$.MODULE$
			.getRowType(relBuilder.getTypeFactory(),
				dataStream,
				tableSchema.getFieldNames(),
				fieldIndices,
				scala.Option.empty());
	DataStreamTable<?> dataStreamTable = new DataStreamTable<>(
		relBuilder.getRelOptSchema(),
		names,
		rowType,
		dataStream,
		fieldIndices,
		tableSchema.getFieldNames(),
		FlinkStatistic.UNKNOWN(),
		scala.Option.empty());
	return LogicalTableScan.create(relBuilder.getCluster(), dataStreamTable);
}
 
Example #12
Source File: RelWriterTest.java    From calcite with Apache License 2.0 5 votes vote down vote up
@Test void testCalc() {
  final FrameworkConfig config = RelBuilderTest.config().build();
  final RelBuilder builder = RelBuilder.create(config);
  final RexBuilder rexBuilder = builder.getRexBuilder();
  final LogicalTableScan scan = (LogicalTableScan) builder.scan("EMP").build();
  final RexProgramBuilder programBuilder =
      new RexProgramBuilder(scan.getRowType(), rexBuilder);
  final RelDataTypeField field = scan.getRowType().getField("SAL", false, false);
  programBuilder.addIdentity();
  programBuilder.addCondition(
      rexBuilder.makeCall(SqlStdOperatorTable.GREATER_THAN,
          new RexInputRef(field.getIndex(), field.getType()),
          builder.literal(10)));
  final LogicalCalc calc = LogicalCalc.create(scan, programBuilder.getProgram());
  String relJson = RelOptUtil.dumpPlan("", calc,
      SqlExplainFormat.JSON, SqlExplainLevel.EXPPLAN_ATTRIBUTES);
  String s =
      Frameworks.withPlanner((cluster, relOptSchema, rootSchema) -> {
        final RelJsonReader reader = new RelJsonReader(
            cluster, getSchema(calc), rootSchema);
        RelNode node;
        try {
          node = reader.read(relJson);
        } catch (IOException e) {
          throw TestUtil.rethrow(e);
        }
        return RelOptUtil.dumpPlan("", node, SqlExplainFormat.TEXT,
            SqlExplainLevel.EXPPLAN_ATTRIBUTES);
      });
  final String expected =
      "LogicalCalc(expr#0..7=[{inputs}], expr#8=[10], expr#9=[>($t5, $t8)],"
          + " proj#0..7=[{exprs}], $condition=[$t9])\n"
          + "  LogicalTableScan(table=[[scott, EMP]])\n";
  assertThat(s, isLinux(expected));
}
 
Example #13
Source File: DruidTable.java    From calcite with Apache License 2.0 5 votes vote down vote up
public RelNode toRel(RelOptTable.ToRelContext context,
    RelOptTable relOptTable) {
  final RelOptCluster cluster = context.getCluster();
  final TableScan scan = LogicalTableScan.create(cluster, relOptTable, ImmutableList.of());
  return DruidQuery.create(cluster,
      cluster.traitSetOf(BindableConvention.INSTANCE), relOptTable, this,
      ImmutableList.of(scan));
}
 
Example #14
Source File: TableScanRule.java    From calcite with Apache License 2.0 5 votes vote down vote up
public void onMatch(RelOptRuleCall call) {
  final LogicalTableScan oldRel = call.rel(0);
  RelNode newRel =
      oldRel.getTable().toRel(
          ViewExpanders.simpleContext(oldRel.getCluster()));
  call.transformTo(newRel);
}
 
Example #15
Source File: EnumerableTableScanRule.java    From calcite with Apache License 2.0 5 votes vote down vote up
/**
 * Creates an EnumerableTableScanRule.
 *
 * @param relBuilderFactory Builder for relational expressions
 */
public EnumerableTableScanRule(RelBuilderFactory relBuilderFactory) {
  super(LogicalTableScan.class,
      (Predicate<LogicalTableScan>) r -> EnumerableTableScan.canHandle(r.getTable()),
      Convention.NONE, EnumerableConvention.INSTANCE, relBuilderFactory,
      "EnumerableTableScanRule");
}
 
Example #16
Source File: EnumerableTableScanRule.java    From calcite with Apache License 2.0 5 votes vote down vote up
@Override public RelNode convert(RelNode rel) {
  LogicalTableScan scan = (LogicalTableScan) rel;
  final RelOptTable relOptTable = scan.getTable();
  final Table table = relOptTable.unwrap(Table.class);
  // The QueryableTable can only be implemented as ENUMERABLE convention,
  // but some test QueryableTables do not really implement the expressions,
  // just skips the QueryableTable#getExpression invocation and returns early.
  if (table instanceof QueryableTable || relOptTable.getExpression(Object.class) != null) {
    return EnumerableTableScan.create(scan.getCluster(), relOptTable);
  }

  return null;
}
 
Example #17
Source File: Bindables.java    From calcite with Apache License 2.0 5 votes vote down vote up
@Override public void onMatch(RelOptRuleCall call) {
  final LogicalTableScan scan = call.rel(0);
  final RelOptTable table = scan.getTable();
  if (BindableTableScan.canHandle(table)) {
    call.transformTo(
        BindableTableScan.create(scan.getCluster(), table));
  }
}
 
Example #18
Source File: RelWriterTest.java    From calcite with Apache License 2.0 5 votes vote down vote up
/**
 * Unit test for {@link org.apache.calcite.rel.externalize.RelJsonWriter} on
 * a simple tree of relational expressions, consisting of a table and a
 * project including window expressions.
 */
@Test void testWriter() {
  String s =
      Frameworks.withPlanner((cluster, relOptSchema, rootSchema) -> {
        rootSchema.add("hr",
            new ReflectiveSchema(new JdbcTest.HrSchema()));
        LogicalTableScan scan =
            LogicalTableScan.create(cluster,
                relOptSchema.getTableForMember(
                    Arrays.asList("hr", "emps")),
                ImmutableList.of());
        final RexBuilder rexBuilder = cluster.getRexBuilder();
        LogicalFilter filter =
            LogicalFilter.create(scan,
                rexBuilder.makeCall(
                    SqlStdOperatorTable.EQUALS,
                    rexBuilder.makeFieldAccess(
                        rexBuilder.makeRangeReference(scan),
                        "deptno", true),
                    rexBuilder.makeExactLiteral(BigDecimal.TEN)));
        final RelJsonWriter writer = new RelJsonWriter();
        final RelDataType bigIntType =
            cluster.getTypeFactory().createSqlType(SqlTypeName.BIGINT);
        LogicalAggregate aggregate =
            LogicalAggregate.create(filter,
                ImmutableList.of(),
                ImmutableBitSet.of(0),
                null,
                ImmutableList.of(
                    AggregateCall.create(SqlStdOperatorTable.COUNT,
                        true, false, false, ImmutableList.of(1), -1,
                        RelCollations.EMPTY, bigIntType, "c"),
                    AggregateCall.create(SqlStdOperatorTable.COUNT,
                        false, false, false, ImmutableList.of(), -1,
                        RelCollations.EMPTY, bigIntType, "d")));
        aggregate.explain(writer);
        return writer.asString();
      });
  assertThat(s, is(XX));
}
 
Example #19
Source File: QueryOperationConverter.java    From flink with Apache License 2.0 5 votes vote down vote up
private RelNode convertToDataStreamScan(DataStreamQueryOperation<?> operation) {
	List<String> names;
	ObjectIdentifier identifier = operation.getIdentifier();
	if (identifier != null) {
		names = Arrays.asList(
			identifier.getCatalogName(),
			identifier.getDatabaseName(),
			identifier.getObjectName());
	} else {
		String refId = String.format("Unregistered_DataStream_%s", operation.getDataStream().getId());
		names = Collections.singletonList(refId);
	}

	final RelDataType rowType = DataStreamTable$.MODULE$
		.getRowType(relBuilder.getTypeFactory(),
			operation.getDataStream(),
			operation.getTableSchema().getFieldNames(),
			operation.getFieldIndices(),
			scala.Option.apply(operation.getFieldNullables()));
	DataStreamTable<?> dataStreamTable = new DataStreamTable<>(
		relBuilder.getRelOptSchema(),
		names,
		rowType,
		operation.getDataStream(),
		operation.getFieldIndices(),
		operation.getTableSchema().getFieldNames(),
		operation.getStatistic(),
		scala.Option.apply(operation.getFieldNullables()));
	return LogicalTableScan.create(relBuilder.getCluster(), dataStreamTable);
}
 
Example #20
Source File: QueryOperationConverter.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public <U> RelNode visit(TableSourceQueryOperation<U> tableSourceOperation) {
	TableSource<?> tableSource = tableSourceOperation.getTableSource();
	boolean isBatch;
	if (tableSource instanceof LookupableTableSource) {
		isBatch = tableSourceOperation.isBatch();
	} else if (tableSource instanceof StreamTableSource) {
		isBatch = ((StreamTableSource<?>) tableSource).isBounded();
	} else {
		throw new TableException(String.format("%s is not supported.", tableSource.getClass().getSimpleName()));
	}

	FlinkStatistic statistic;
	List<String> names;
	if (tableSourceOperation instanceof RichTableSourceQueryOperation &&
		((RichTableSourceQueryOperation<U>) tableSourceOperation).getQualifiedName() != null) {
		statistic = ((RichTableSourceQueryOperation<U>) tableSourceOperation).getStatistic();
		names = ((RichTableSourceQueryOperation<U>) tableSourceOperation).getQualifiedName();
	} else {
		statistic = FlinkStatistic.UNKNOWN();
		// TableSourceScan requires a unique name of a Table for computing a digest.
		// We are using the identity hash of the TableSource object.
		String refId = "Unregistered_TableSource_" + System.identityHashCode(tableSource);
		names = Collections.singletonList(refId);
	}

	TableSourceTable<?> tableSourceTable = new TableSourceTable<>(tableSource, !isBatch, statistic);
	FlinkRelOptTable table = FlinkRelOptTable.create(
		relBuilder.getRelOptSchema(),
		tableSourceTable.getRowType(relBuilder.getTypeFactory()),
		names,
		tableSourceTable);
	return LogicalTableScan.create(relBuilder.getCluster(), table);
}
 
Example #21
Source File: PushDownLogicTableRule.java    From Mycat2 with GNU General Public License v3.0 5 votes vote down vote up
@NotNull
private static RelNode getBindableTableScan(Bindables.BindableTableScan bindableTableScan, RelOptCluster cluster, RelOptSchema relOptSchema, DataNode backendTableInfo) {
    String uniqueName = backendTableInfo.getUniqueName();
    MycatLogicTable unwrap = bindableTableScan.getTable().unwrap(MycatLogicTable.class);
    MycatPhysicalTable mycatPhysicalTable = new MycatPhysicalTable(unwrap,backendTableInfo);
    RelOptTable dataNode = RelOptTableImpl.create(
            relOptSchema,
            mycatPhysicalTable.getRowType(cluster.getTypeFactory()),
            mycatPhysicalTable,
            ImmutableList.of(uniqueName));
    RelNode logicalTableScan = LogicalTableScan.create(cluster, dataNode, ImmutableList.of());
    return RelOptUtil.createProject(RelOptUtil.createFilter(logicalTableScan, bindableTableScan.filters), bindableTableScan.projects);
}
 
Example #22
Source File: CopyWithCluster.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
private RelNode copyOf(LogicalTableScan rel) {
  return new LogicalTableScan(
    cluster,
    copyOf(rel.getTraitSet()),
    rel.getTable()
  );
}
 
Example #23
Source File: CopyWithCluster.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
@Override
public RelNode visit(TableScan scan) {
  if (scan instanceof CopyToCluster) {
    return ((CopyToCluster) scan).copyWith(this);
  } else if (scan instanceof LogicalTableScan) {
    return copyOf((LogicalTableScan) scan);
  }
  notSupported(scan);
  return super.visit(scan);
}
 
Example #24
Source File: CrelUniqifier.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
@Override
public RelNode visit(RelNode other) {
  if(!data.add(other)) {
    if (other instanceof LogicalTableScan) {
      // LogicalTableScan does not have implementation of a deep copy. Create a new instance.
      other = LogicalTableScan.create(other.getCluster(), other.getTable());
    } else {
      other = other.copy(other.getTraitSet(), other.getInputs());
    }
  }

  return super.visit(other);
}
 
Example #25
Source File: TableScanRule.java    From Bats with Apache License 2.0 5 votes vote down vote up
public void onMatch(RelOptRuleCall call) {
  final LogicalTableScan oldRel = call.rel(0);
  RelNode newRel =
      oldRel.getTable().toRel(
          ViewExpanders.simpleContext(oldRel.getCluster()));
  call.transformTo(newRel);
}
 
Example #26
Source File: TraitPropagationTest.java    From calcite with Apache License 2.0 4 votes vote down vote up
private PhysTableRule() {
  super(anyChild(LogicalTableScan.class), "PhysScan");
}
 
Example #27
Source File: MockCatalogReader.java    From calcite with Apache License 2.0 4 votes vote down vote up
@Override public RelNode toRel(RelOptTable.ToRelContext context, RelOptTable relOptTable) {
  return LogicalTableScan.create(context.getCluster(), relOptTable, context.getTableHints());
}
 
Example #28
Source File: RelFactories.java    From Bats with Apache License 2.0 4 votes vote down vote up
public RelNode createScan(RelOptCluster cluster, RelOptTable table) {
  return LogicalTableScan.create(cluster, table);
}
 
Example #29
Source File: TraitPropagationTest.java    From calcite with Apache License 2.0 4 votes vote down vote up
public void onMatch(RelOptRuleCall call) {
  LogicalTableScan rel = call.rel(0);
  call.transformTo(new PhysTable(rel.getCluster()));
}
 
Example #30
Source File: MockCatalogReader.java    From calcite with Apache License 2.0 4 votes vote down vote up
public RelNode toRel(ToRelContext context) {
  return LogicalTableScan.create(context.getCluster(), this, context.getTableHints());
}