Java Code Examples for org.apache.kylin.storage.StorageContext

The following examples show how to use org.apache.kylin.storage.StorageContext. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: kylin-on-parquet-v2   Source File: OLAPContext.java    License: Apache License 2.0 6 votes vote down vote up
public OLAPContext(int seq) {
    this.id = seq;
    this.storageContext = new StorageContext(seq);
    this.sortColumns = Lists.newArrayList();
    this.sortOrders = Lists.newArrayList();
    Map<String, String> parameters = _localPrarameters.get();
    if (parameters != null) {
        String acceptPartialResult = parameters.get(PRM_ACCEPT_PARTIAL_RESULT);
        if (acceptPartialResult != null) {
            this.storageContext.setAcceptPartialResult(Boolean.parseBoolean(acceptPartialResult));
        }
        String acceptUserInfo = parameters.get(PRM_USER_AUTHEN_INFO);
        if (null != acceptUserInfo)
            this.olapAuthen.parseUserInfo(acceptUserInfo);
    }
}
 
Example 2
Source Project: kylin-on-parquet-v2   Source File: ITStorageTest.java    License: Apache License 2.0 6 votes vote down vote up
private int search(List<TblColRef> groups, List<FunctionDesc> aggregations, TupleFilter filter, StorageContext context) {
    int count = 0;
    ITupleIterator iterator = null;
    try {
        SQLDigest sqlDigest = new SQLDigest("default.test_kylin_fact", /*allCol*/ Collections.<TblColRef> emptySet(), /*join*/ null, //
                groups, /*subqueryJoinParticipants*/ Sets.<TblColRef> newHashSet(), //
                /*dynamicGroupByColumns*/ Collections.<TblColRef, TupleExpression> emptyMap(), //
                /*groupByExpression*/ false, //
                /*metricCol*/ Collections.<TblColRef> emptySet(), aggregations, /*aggrSqlCalls*/ Collections.<SQLCall> emptyList(), //
                /*dynamicAggregations*/ Collections.<DynamicFunctionDesc> emptyList(), //
                /*runtimeDimensionColumns*/ Collections.<TblColRef> emptySet(), //
                /*runtimeMetricColumns*/ Collections.<TblColRef> emptySet(), //
                /*filter col*/ Collections.<TblColRef> emptySet(), filter, null, //
                /*sortCol*/ new ArrayList<TblColRef>(), new ArrayList<SQLDigest.OrderEnum>(), false, false, false, new HashSet<MeasureDesc>());
        iterator = storageEngine.search(context, sqlDigest, mockup.newTupleInfo(groups, aggregations));
        while (iterator.hasNext()) {
            ITuple tuple = iterator.next();
            System.out.println("Tuple = " + tuple);
            count++;
        }
    } finally {
        if (iterator != null)
            iterator.close();
    }
    return count;
}
 
Example 3
public SegmentCubeTupleIterator(CubeSegmentScanner scanner, Cuboid cuboid, Set<TblColRef> selectedDimensions, //
        Set<FunctionDesc> selectedMetrics, TupleInfo returnTupleInfo, StorageContext context) {
    this.scanner = scanner;
    this.cuboid = cuboid;
    this.selectedDimensions = selectedDimensions;
    this.selectedMetrics = selectedMetrics;
    this.tupleInfo = returnTupleInfo;
    this.tuple = new Tuple(returnTupleInfo);
    this.context = context;

    CuboidToGridTableMapping mapping = context.getMapping();
    int[] gtDimsIdx = mapping.getDimIndexes(selectedDimensions);
    int[] gtMetricsIdx = mapping.getMetricsIndexes(selectedMetrics);
    // gtColIdx = gtDimsIdx + gtMetricsIdx
    int[] gtColIdx = new int[gtDimsIdx.length + gtMetricsIdx.length];
    System.arraycopy(gtDimsIdx, 0, gtColIdx, 0, gtDimsIdx.length);
    System.arraycopy(gtMetricsIdx, 0, gtColIdx, gtDimsIdx.length, gtMetricsIdx.length);

    this.gtValues = getGTValuesIterator(scanner.iterator(), scanner.getScanRequest(), gtDimsIdx, gtMetricsIdx);
    this.cubeTupleConverter = ((GTCubeStorageQueryBase) context.getStorageQuery()).newCubeTupleConverter(
            scanner.cubeSeg, cuboid, selectedDimensions, selectedMetrics, gtColIdx, tupleInfo);
}
 
Example 4
public SequentialCubeTupleIterator(List<CubeSegmentScanner> scanners, Cuboid cuboid,
        Set<TblColRef> selectedDimensions, List<TblColRef> rtGroups, Set<TblColRef> groups, //
        Set<FunctionDesc> selectedMetrics, TupleInfo returnTupleInfo, StorageContext context, SQLDigest sqlDigest) {
    this.context = context;
    this.scanners = scanners;

    Set<TblColRef> selectedDims = Sets.newHashSet(selectedDimensions);
    selectedDims.addAll(rtGroups);

    segmentCubeTupleIterators = Lists.newArrayList();
    for (CubeSegmentScanner scanner : scanners) {
        segmentCubeTupleIterators.add(new SegmentCubeTupleIterator(scanner, cuboid, selectedDims, selectedMetrics, returnTupleInfo, context));
    }

    if (context.mergeSortPartitionResults() && !sqlDigest.isRawQuery) {
        //query with limit
        logger.info("Using SortedIteratorMergerWithLimit to merge segment results");
        Iterator<Iterator<ITuple>> transformed = (Iterator<Iterator<ITuple>>) (Iterator<?>) segmentCubeTupleIterators.iterator();
        tupleIterator = new SortedIteratorMergerWithLimit<ITuple>(transformed, context.getFinalPushDownLimit(), getTupleDimensionComparator(cuboid, groups, returnTupleInfo)).getIterator();
    } else {
        //normal case
        logger.info("Using Iterators.concat to merge segment results");
        tupleIterator = Iterators.concat(segmentCubeTupleIterators.iterator());
    }
}
 
Example 5
@Override
public ITupleIterator search(StorageContext context, SQLDigest sqlDigest, TupleInfo returnTupleInfo) {
    GTCubeStorageQueryRequest request = getStorageQueryRequest(context, sqlDigest, returnTupleInfo);

    List<CubeSegmentScanner> scanners = Lists.newArrayList();
    SegmentPruner segPruner = new SegmentPruner(sqlDigest.filter);
    for (CubeSegment cubeSeg : segPruner.listSegmentsForQuery(cubeInstance)) {
        CubeSegmentScanner scanner = new CubeSegmentScanner(cubeSeg, request.getCuboid(), request.getDimensions(), //
                request.getGroups(), request.getDynGroups(), request.getDynGroupExprs(), //
                request.getMetrics(), request.getDynFuncs(), //
                request.getFilter(), request.getHavingFilter(), request.getContext());
        if (!scanner.isSegmentSkipped())
            scanners.add(scanner);
    }

    if (scanners.isEmpty())
        return ITupleIterator.EMPTY_TUPLE_ITERATOR;

    return new SequentialCubeTupleIterator(scanners, request.getCuboid(), request.getDimensions(),
            request.getDynGroups(), request.getGroups(), request.getMetrics(), returnTupleInfo,
            request.getContext(), sqlDigest);
}
 
Example 6
private void enableStreamAggregateIfBeneficial(Cuboid cuboid, Set<TblColRef> groupsD, StorageContext context) {
    CubeDesc cubeDesc = cuboid.getCubeDesc();
    boolean enabled = cubeDesc.getConfig().isStreamAggregateEnabled();

    Set<TblColRef> shardByInGroups = Sets.newHashSet();
    for (TblColRef col : cubeDesc.getShardByColumns()) {
        if (groupsD.contains(col)) {
            shardByInGroups.add(col);
        }
    }
    if (!shardByInGroups.isEmpty()) {
        enabled = false;
        logger.debug("Aggregate partition results is not beneficial because shard by columns in groupD: {}",
                shardByInGroups);
    }

    if (!context.isNeedStorageAggregation()) {
        enabled = false;
        logger.debug("Aggregate partition results is not beneficial because no storage aggregation");
    }

    if (enabled) {
        context.enableStreamAggregate();
    }
}
 
Example 7
public GTCubeStorageQueryRequest(Cuboid cuboid, Set<TblColRef> dimensions, //
        Set<TblColRef> groups, List<TblColRef> dynGroups, List<TupleExpression> dynGroupExprs, //
        Set<TblColRef> filterCols, Set<FunctionDesc> metrics, List<DynamicFunctionDesc> dynFuncs, //
        TupleFilter filter, TupleFilter havingFilter, StorageContext context) {
    this.cuboid = cuboid;
    this.dimensions = dimensions;
    this.groups = groups;
    this.dynGroups = dynGroups;
    this.dynGroupExprs = dynGroupExprs;
    this.filterCols = filterCols;
    this.metrics = metrics;
    this.dynFuncs = dynFuncs;
    this.filter = filter;
    this.havingFilter = havingFilter;
    this.context = context;
}
 
Example 8
Source Project: kylin   Source File: OLAPContext.java    License: Apache License 2.0 6 votes vote down vote up
public OLAPContext(int seq) {
    this.id = seq;
    this.storageContext = new StorageContext(seq);
    this.sortColumns = Lists.newArrayList();
    this.sortOrders = Lists.newArrayList();
    Map<String, String> parameters = _localPrarameters.get();
    if (parameters != null) {
        String acceptPartialResult = parameters.get(PRM_ACCEPT_PARTIAL_RESULT);
        if (acceptPartialResult != null) {
            this.storageContext.setAcceptPartialResult(Boolean.parseBoolean(acceptPartialResult));
        }
        String acceptUserInfo = parameters.get(PRM_USER_AUTHEN_INFO);
        if (null != acceptUserInfo)
            this.olapAuthen.parseUserInfo(acceptUserInfo);
    }
}
 
Example 9
Source Project: kylin   Source File: ITStorageTest.java    License: Apache License 2.0 6 votes vote down vote up
private int search(List<TblColRef> groups, List<FunctionDesc> aggregations, TupleFilter filter, StorageContext context) {
    int count = 0;
    ITupleIterator iterator = null;
    try {
        SQLDigest sqlDigest = new SQLDigest("default.test_kylin_fact", /*allCol*/ Collections.<TblColRef> emptySet(), /*join*/ null, //
                groups, /*subqueryJoinParticipants*/ Sets.<TblColRef> newHashSet(), //
                /*dynamicGroupByColumns*/ Collections.<TblColRef, TupleExpression> emptyMap(), //
                /*groupByExpression*/ false, //
                /*metricCol*/ Collections.<TblColRef> emptySet(), aggregations, /*aggrSqlCalls*/ Collections.<SQLCall> emptyList(), //
                /*dynamicAggregations*/ Collections.<DynamicFunctionDesc> emptyList(), //
                /*runtimeDimensionColumns*/ Collections.<TblColRef> emptySet(), //
                /*runtimeMetricColumns*/ Collections.<TblColRef> emptySet(), //
                /*filter col*/ Collections.<TblColRef> emptySet(), filter, null, //
                /*sortCol*/ new ArrayList<TblColRef>(), new ArrayList<SQLDigest.OrderEnum>(), false, false, false, new HashSet<MeasureDesc>());
        iterator = storageEngine.search(context, sqlDigest, mockup.newTupleInfo(groups, aggregations));
        while (iterator.hasNext()) {
            ITuple tuple = iterator.next();
            System.out.println("Tuple = " + tuple);
            count++;
        }
    } finally {
        if (iterator != null)
            iterator.close();
    }
    return count;
}
 
Example 10
Source Project: kylin   Source File: SegmentCubeTupleIterator.java    License: Apache License 2.0 6 votes vote down vote up
public SegmentCubeTupleIterator(CubeSegmentScanner scanner, Cuboid cuboid, Set<TblColRef> selectedDimensions, //
        Set<FunctionDesc> selectedMetrics, TupleInfo returnTupleInfo, StorageContext context) {
    this.scanner = scanner;
    this.cuboid = cuboid;
    this.selectedDimensions = selectedDimensions;
    this.selectedMetrics = selectedMetrics;
    this.tupleInfo = returnTupleInfo;
    this.tuple = new Tuple(returnTupleInfo);
    this.context = context;

    CuboidToGridTableMapping mapping = context.getMapping();
    int[] gtDimsIdx = mapping.getDimIndexes(selectedDimensions);
    int[] gtMetricsIdx = mapping.getMetricsIndexes(selectedMetrics);
    // gtColIdx = gtDimsIdx + gtMetricsIdx
    int[] gtColIdx = new int[gtDimsIdx.length + gtMetricsIdx.length];
    System.arraycopy(gtDimsIdx, 0, gtColIdx, 0, gtDimsIdx.length);
    System.arraycopy(gtMetricsIdx, 0, gtColIdx, gtDimsIdx.length, gtMetricsIdx.length);

    this.gtValues = getGTValuesIterator(scanner.iterator(), scanner.getScanRequest(), gtDimsIdx, gtMetricsIdx);
    this.cubeTupleConverter = ((GTCubeStorageQueryBase) context.getStorageQuery()).newCubeTupleConverter(
            scanner.cubeSeg, cuboid, selectedDimensions, selectedMetrics, gtColIdx, tupleInfo);
}
 
Example 11
Source Project: kylin   Source File: SequentialCubeTupleIterator.java    License: Apache License 2.0 6 votes vote down vote up
public SequentialCubeTupleIterator(List<CubeSegmentScanner> scanners, Cuboid cuboid,
        Set<TblColRef> selectedDimensions, List<TblColRef> rtGroups, Set<TblColRef> groups, //
        Set<FunctionDesc> selectedMetrics, TupleInfo returnTupleInfo, StorageContext context, SQLDigest sqlDigest) {
    this.context = context;
    this.scanners = scanners;

    Set<TblColRef> selectedDims = Sets.newHashSet(selectedDimensions);
    selectedDims.addAll(rtGroups);

    segmentCubeTupleIterators = Lists.newArrayList();
    for (CubeSegmentScanner scanner : scanners) {
        segmentCubeTupleIterators.add(new SegmentCubeTupleIterator(scanner, cuboid, selectedDims, selectedMetrics, returnTupleInfo, context));
    }

    if (context.mergeSortPartitionResults() && !sqlDigest.isRawQuery) {
        //query with limit
        logger.info("Using SortedIteratorMergerWithLimit to merge segment results");
        Iterator<Iterator<ITuple>> transformed = (Iterator<Iterator<ITuple>>) (Iterator<?>) segmentCubeTupleIterators.iterator();
        tupleIterator = new SortedIteratorMergerWithLimit<ITuple>(transformed, context.getFinalPushDownLimit(), getTupleDimensionComparator(cuboid, groups, returnTupleInfo)).getIterator();
    } else {
        //normal case
        logger.info("Using Iterators.concat to merge segment results");
        tupleIterator = Iterators.concat(segmentCubeTupleIterators.iterator());
    }
}
 
Example 12
Source Project: kylin   Source File: GTCubeStorageQueryBase.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public ITupleIterator search(StorageContext context, SQLDigest sqlDigest, TupleInfo returnTupleInfo) {
    GTCubeStorageQueryRequest request = getStorageQueryRequest(context, sqlDigest, returnTupleInfo);

    List<CubeSegmentScanner> scanners = Lists.newArrayList();
    SegmentPruner segPruner = new SegmentPruner(sqlDigest.filter);
    for (CubeSegment cubeSeg : segPruner.listSegmentsForQuery(cubeInstance)) {
        CubeSegmentScanner scanner = new CubeSegmentScanner(cubeSeg, request.getCuboid(), request.getDimensions(), //
                request.getGroups(), request.getDynGroups(), request.getDynGroupExprs(), //
                request.getMetrics(), request.getDynFuncs(), //
                request.getFilter(), request.getHavingFilter(), request.getContext());
        if (!scanner.isSegmentSkipped())
            scanners.add(scanner);
    }

    if (scanners.isEmpty())
        return ITupleIterator.EMPTY_TUPLE_ITERATOR;

    return new SequentialCubeTupleIterator(scanners, request.getCuboid(), request.getDimensions(),
            request.getDynGroups(), request.getGroups(), request.getMetrics(), returnTupleInfo,
            request.getContext(), sqlDigest);
}
 
Example 13
Source Project: kylin   Source File: GTCubeStorageQueryBase.java    License: Apache License 2.0 6 votes vote down vote up
private void enableStreamAggregateIfBeneficial(Cuboid cuboid, Set<TblColRef> groupsD, StorageContext context) {
    CubeDesc cubeDesc = cuboid.getCubeDesc();
    boolean enabled = cubeDesc.getConfig().isStreamAggregateEnabled();

    Set<TblColRef> shardByInGroups = Sets.newHashSet();
    for (TblColRef col : cubeDesc.getShardByColumns()) {
        if (groupsD.contains(col)) {
            shardByInGroups.add(col);
        }
    }
    if (!shardByInGroups.isEmpty()) {
        enabled = false;
        logger.debug("Aggregate partition results is not beneficial because shard by columns in groupD: {}",
                shardByInGroups);
    }

    if (!context.isNeedStorageAggregation()) {
        enabled = false;
        logger.debug("Aggregate partition results is not beneficial because no storage aggregation");
    }

    if (enabled) {
        context.enableStreamAggregate();
    }
}
 
Example 14
Source Project: kylin   Source File: GTCubeStorageQueryRequest.java    License: Apache License 2.0 6 votes vote down vote up
public GTCubeStorageQueryRequest(Cuboid cuboid, Set<TblColRef> dimensions, //
        Set<TblColRef> groups, List<TblColRef> dynGroups, List<TupleExpression> dynGroupExprs, //
        Set<TblColRef> filterCols, Set<FunctionDesc> metrics, List<DynamicFunctionDesc> dynFuncs, //
        TupleFilter filter, TupleFilter havingFilter, StorageContext context) {
    this.cuboid = cuboid;
    this.dimensions = dimensions;
    this.groups = groups;
    this.dynGroups = dynGroups;
    this.dynGroupExprs = dynGroupExprs;
    this.filterCols = filterCols;
    this.metrics = metrics;
    this.dynFuncs = dynFuncs;
    this.filter = filter;
    this.havingFilter = havingFilter;
    this.context = context;
}
 
Example 15
Source Project: Kylin   Source File: OLAPSortRel.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void implementRewrite(RewriteImplementor implementor) {
    implementor.visitChild(this, getChild());

    for (RelFieldCollation fieldCollation : this.collation.getFieldCollations()) {
        int index = fieldCollation.getFieldIndex();
        StorageContext.OrderEnum order = getOrderEnum(fieldCollation.getDirection());
        OLAPRel olapChild = (OLAPRel) this.getChild();
        TblColRef orderCol = olapChild.getColumnRowType().getAllColumns().get(index);
        MeasureDesc measure = findMeasure(orderCol);
        if (measure != null) {
            this.context.storageContext.addSort(measure, order);
        }
        this.context.storageContext.markSort();
    }

    this.rowType = this.deriveRowType();
    this.columnRowType = buildColumnRowType();
}
 
Example 16
Source Project: Kylin   Source File: CubeSegmentTupleIterator.java    License: Apache License 2.0 6 votes vote down vote up
public CubeSegmentTupleIterator(CubeSegment cubeSeg, Collection<HBaseKeyRange> keyRanges, HConnection conn, Collection<TblColRef> dimensions, TupleFilter filter, Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context) {
    this.cube = cubeSeg.getCubeInstance();
    this.cubeSeg = cubeSeg;
    this.dimensions = dimensions;
    this.filter = filter;
    this.groupBy = groupBy;
    this.rowValueDecoders = rowValueDecoders;
    this.context = context;
    this.tableName = cubeSeg.getStorageLocationIdentifier();
    this.rowKeyDecoder = new RowKeyDecoder(this.cubeSeg);
    this.scanCount = 0;

    try {
        this.table = conn.getTable(tableName);
    } catch (Throwable t) {
        throw new StorageException("Error when open connection to table " + tableName, t);
    }
    this.rangeIterator = keyRanges.iterator();
    scanNextRange();
}
 
Example 17
Source Project: Kylin   Source File: ObserverEnabler.java    License: Apache License 2.0 6 votes vote down vote up
public static ResultScanner scanWithCoprocessorIfBeneficial(CubeSegment segment, Cuboid cuboid, TupleFilter tupleFiler, //
        Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context, HTableInterface table, Scan scan) throws IOException {

    if (context.isCoprocessorEnabled() == false) {
        return table.getScanner(scan);
    }

    CoprocessorRowType type = CoprocessorRowType.fromCuboid(segment, cuboid);
    CoprocessorFilter filter = CoprocessorFilter.fromFilter(segment, tupleFiler);
    CoprocessorProjector projector = CoprocessorProjector.makeForObserver(segment, cuboid, groupBy);
    ObserverAggregators aggrs = ObserverAggregators.fromValueDecoders(rowValueDecoders);

    if (DEBUG_LOCAL_COPROCESSOR) {
        RegionScanner innerScanner = new RegionScannerAdapter(table.getScanner(scan));
        AggregationScanner aggrScanner = new AggregationScanner(type, filter, projector, aggrs, innerScanner);
        return new ResultScannerAdapter(aggrScanner);
    } else {
        scan.setAttribute(AggregateRegionObserver.COPROCESSOR_ENABLE, new byte[] { 0x01 });
        scan.setAttribute(AggregateRegionObserver.TYPE, CoprocessorRowType.serialize(type));
        scan.setAttribute(AggregateRegionObserver.PROJECTOR, CoprocessorProjector.serialize(projector));
        scan.setAttribute(AggregateRegionObserver.AGGREGATORS, ObserverAggregators.serialize(aggrs));
        scan.setAttribute(AggregateRegionObserver.FILTER, CoprocessorFilter.serialize(filter));
        return table.getScanner(scan);
    }
}
 
Example 18
Source Project: Kylin   Source File: SerializedHBaseTupleIterator.java    License: Apache License 2.0 6 votes vote down vote up
public SerializedHBaseTupleIterator(HConnection conn, List<HBaseKeyRange> segmentKeyRanges, CubeInstance cube, Collection<TblColRef> dimensions, TupleFilter filter, Collection<TblColRef> groupBy, Collection<RowValueDecoder> rowValueDecoders, StorageContext context) {

        this.context = context;
        int limit = context.getLimit();
        this.partialResultLimit = Math.max(limit, PARTIAL_DEFAULT_LIMIT);

        this.segmentIteratorList = new ArrayList<CubeSegmentTupleIterator>(segmentKeyRanges.size());
        Map<CubeSegment, List<HBaseKeyRange>> rangesMap = makeRangesMap(segmentKeyRanges);
        for (Map.Entry<CubeSegment, List<HBaseKeyRange>> entry : rangesMap.entrySet()) {
            CubeSegmentTupleIterator segIter = new CubeSegmentTupleIterator(entry.getKey(), entry.getValue(), conn, dimensions, filter, groupBy, rowValueDecoders, context);
            this.segmentIteratorList.add(segIter);
        }

        this.segmentIteratorIterator = this.segmentIteratorList.iterator();
        if (this.segmentIteratorIterator.hasNext()) {
            this.segmentIterator = this.segmentIteratorIterator.next();
        } else {
            this.segmentIterator = ITupleIterator.EMPTY_TUPLE_ITERATOR;
        }
    }
 
Example 19
Source Project: Kylin   Source File: CubeStorageEngine.java    License: Apache License 2.0 6 votes vote down vote up
private void setThreshold(Collection<TblColRef> dimensions, List<RowValueDecoder> valueDecoders, StorageContext context) {
    if (RowValueDecoder.hasMemHungryCountDistinct(valueDecoders) == false) {
        return;
    }

    int rowSizeEst = dimensions.size() * 3;
    for (RowValueDecoder decoder : valueDecoders) {
        MeasureDesc[] measures = decoder.getMeasures();
        BitSet projectionIndex = decoder.getProjectionIndex();
        for (int i = projectionIndex.nextSetBit(0); i >= 0; i = projectionIndex.nextSetBit(i + 1)) {
            FunctionDesc func = measures[i].getFunction();
            rowSizeEst += func.getReturnDataType().getSpaceEstimate();
        }
    }

    long rowEst = MEM_BUDGET_PER_QUERY / rowSizeEst;
    context.setThreshold((int) rowEst);
}
 
Example 20
Source Project: Kylin   Source File: StorageTest.java    License: Apache License 2.0 6 votes vote down vote up
private int search(List<TblColRef> groups, List<FunctionDesc> aggregations, TupleFilter filter, StorageContext context) {
    int count = 0;
    ITupleIterator iterator = null;
    try {
        SQLDigest sqlDigest = new SQLDigest("default.test_kylin_fact", filter, null, Collections.<TblColRef> emptySet(), groups, Collections.<TblColRef> emptySet(), Collections.<TblColRef> emptySet(), aggregations);
        iterator = storageEngine.search(context, sqlDigest);
        while (iterator.hasNext()) {
            ITuple tuple = iterator.next();
            System.out.println("Tuple = " + tuple);
            count++;
        }
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        if (iterator != null) {
            iterator.close();
        }
    }
    return count;
}
 
Example 21
@Override
public ITupleIterator search(StorageContext context, SQLDigest sqlDigest, TupleInfo returnTupleInfo) {
    StreamingSegmentManager cubeDataStore = StreamingServer.getInstance().getStreamingSegmentManager(
            cubeInstance.getName());
    boolean enableStreamProfile = BackdoorToggles.isStreamingProfileEnable();
    StreamingQueryProfile queryProfile = new StreamingQueryProfile(QueryContextFacade.current().getQueryId(),
            System.currentTimeMillis());
    if (enableStreamProfile) {
        queryProfile.enableDetailProfile();
    }
    StreamingQueryProfile.set(queryProfile);
    GTCubeStorageQueryRequest request = getStorageQueryRequest(context, sqlDigest, returnTupleInfo);
    return cubeDataStore.getSearcher().search(returnTupleInfo, request.getFilter(), request.getHavingFilter(),
            request.getDimensions(), request.getGroups(), request.getMetrics(), context.isNeedStorageAggregation());
}
 
Example 22
Source Project: kylin-on-parquet-v2   Source File: ITStorageTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
    this.createTestMetadata();

    CubeManager cubeMgr = CubeManager.getInstance(getTestConfig());
    cube = cubeMgr.getCube("test_kylin_cube_without_slr_left_join_empty");
    Assert.assertNotNull(cube);
    storageEngine = StorageFactory.createQuery(cube);
    context = new StorageContext();
    context.setConnUrl(KylinConfig.getInstanceFromEnv().getStorageUrl());
    mockup = new StorageMockUtils(cube.getModel());
}
 
Example 23
Source Project: kylin-on-parquet-v2   Source File: HybridStorageQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ITupleIterator search(final StorageContext context, final SQLDigest sqlDigest, final TupleInfo returnTupleInfo) {
    List<ITupleIterator> tupleIterators = Lists.newArrayList();
    for (int i = 0; i < realizations.length; i++) {
        if (realizations[i].isReady() && realizations[i].isCapable(sqlDigest).capable) {
            ITupleIterator dataIterator = storageEngines[i].search(context, sqlDigest, returnTupleInfo);
            tupleIterators.add(dataIterator);
        }
    }
    // combine tuple iterator
    return new CompoundTupleIterator(tupleIterators);
}
 
Example 24
public StorageResponseGTScatter(GTScanRequest scanRequest, IPartitionStreamer partitionStreamer,
        StorageContext context) {
    this.info = scanRequest.getInfo();
    this.partitionStreamer = partitionStreamer;
    this.blocks = partitionStreamer.asByteArrayIterator();
    this.columns = scanRequest.getColumns();
    this.groupByDims = scanRequest.getAggrGroupBy();
    this.needSorted = (context.getFinalPushDownLimit() != Integer.MAX_VALUE) || context.isStreamAggregateEnabled();
}
 
Example 25
Source Project: kylin-on-parquet-v2   Source File: CubeHBaseRPC.java    License: Apache License 2.0 5 votes vote down vote up
public CubeHBaseRPC(ISegment segment, Cuboid cuboid, GTInfo fullGTInfo, StorageContext context) {
    Preconditions.checkArgument(segment instanceof CubeSegment, "segment must be CubeSegment");
    
    this.cubeSeg = (CubeSegment) segment;
    this.cuboid = cuboid;
    this.fullGTInfo = fullGTInfo;
    this.queryContext = QueryContextFacade.current();
    this.storageContext = context;

    this.fuzzyKeyEncoder = new FuzzyKeyEncoder(cubeSeg, cuboid);
    this.fuzzyMaskEncoder = new FuzzyMaskEncoder(cubeSeg, cuboid);
}
 
Example 26
public GTCubeStorageQueryRequest getStorageQueryRequest(StorageContext context, SQLDigest sqlDigest,
                                                        TupleInfo returnTupleInfo) {
    context.setStorageQuery(this);

    //cope with queries with no aggregations
    RawQueryLastHacker.hackNoAggregations(sqlDigest, cubeDesc, returnTupleInfo);

    // Customized measure taking effect: e.g. allow custom measures to help raw queries
    notifyBeforeStorageQuery(sqlDigest);

    Collection<TblColRef> groups = sqlDigest.groupbyColumns;
    TupleFilter filter = sqlDigest.filter;

    // build dimension & metrics
    Set<TblColRef> dimensions = new LinkedHashSet<>();
    Set<FunctionDesc> metrics = new LinkedHashSet<>();
    buildDimensionsAndMetrics(sqlDigest, dimensions, metrics);

    // all dimensions = groups + other(like filter) dimensions
    Set<TblColRef> otherDims = Sets.newHashSet(dimensions);
    otherDims.removeAll(groups);

    // expand derived (xxxD means contains host columns only, derived columns were translated)
    Set<TblColRef> derivedPostAggregation = Sets.newHashSet();
    Set<TblColRef> groupsD = expandDerived(groups, derivedPostAggregation);
    Set<TblColRef> otherDimsD = expandDerived(otherDims, derivedPostAggregation);
    otherDimsD.removeAll(groupsD);

    // identify cuboid
    Set<TblColRef> dimensionsD = new LinkedHashSet<>();
    dimensionsD.addAll(groupsD);
    dimensionsD.addAll(otherDimsD);
    Cuboid cuboid = findCuboid(cubeInstance, dimensionsD, metrics);
    context.setCuboid(cuboid);
    return new GTCubeStorageQueryRequest(cuboid, dimensionsD, groupsD, null, null, null,
            metrics, null, null, null, context);
}
 
Example 27
Source Project: kylin   Source File: LocalStreamStorageQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ITupleIterator search(StorageContext context, SQLDigest sqlDigest, TupleInfo returnTupleInfo) {
    StreamingSegmentManager cubeDataStore = StreamingServer.getInstance().getStreamingSegmentManager(
            cubeInstance.getName());
    boolean enableStreamProfile = BackdoorToggles.isStreamingProfileEnable();
    StreamingQueryProfile queryProfile = new StreamingQueryProfile(QueryContextFacade.current().getQueryId(),
            System.currentTimeMillis());
    if (enableStreamProfile) {
        queryProfile.enableDetailProfile();
    }
    StreamingQueryProfile.set(queryProfile);
    GTCubeStorageQueryRequest request = getStorageQueryRequest(context, sqlDigest, returnTupleInfo);
    return cubeDataStore.getSearcher().search(returnTupleInfo, request.getFilter(), request.getHavingFilter(),
            request.getDimensions(), request.getGroups(), request.getMetrics(), context.isNeedStorageAggregation());
}
 
Example 28
Source Project: kylin   Source File: ITStorageTest.java    License: Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
    this.createTestMetadata();

    CubeManager cubeMgr = CubeManager.getInstance(getTestConfig());
    cube = cubeMgr.getCube("test_kylin_cube_without_slr_left_join_empty");
    Assert.assertNotNull(cube);
    storageEngine = StorageFactory.createQuery(cube);
    context = new StorageContext();
    context.setConnUrl(KylinConfig.getInstanceFromEnv().getStorageUrl());
    mockup = new StorageMockUtils(cube.getModel());
}
 
Example 29
Source Project: kylin   Source File: HybridStorageQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public ITupleIterator search(final StorageContext context, final SQLDigest sqlDigest, final TupleInfo returnTupleInfo) {
    List<ITupleIterator> tupleIterators = Lists.newArrayList();
    for (int i = 0; i < realizations.length; i++) {
        if (realizations[i].isReady() && realizations[i].isCapable(sqlDigest).capable) {
            ITupleIterator dataIterator = storageEngines[i].search(context, sqlDigest, returnTupleInfo);
            tupleIterators.add(dataIterator);
        }
    }
    // combine tuple iterator
    return new CompoundTupleIterator(tupleIterators);
}
 
Example 30
Source Project: kylin   Source File: StorageResponseGTScatter.java    License: Apache License 2.0 5 votes vote down vote up
public StorageResponseGTScatter(GTScanRequest scanRequest, IPartitionStreamer partitionStreamer,
        StorageContext context) {
    this.info = scanRequest.getInfo();
    this.partitionStreamer = partitionStreamer;
    this.blocks = partitionStreamer.asByteArrayIterator();
    this.columns = scanRequest.getColumns();
    this.groupByDims = scanRequest.getAggrGroupBy();
    this.needSorted = (context.getFinalPushDownLimit() != Integer.MAX_VALUE) || context.isStreamAggregateEnabled();
}