Java Code Examples for org.apache.lucene.search.IndexSearcher#createNormalizedWeight()

The following examples show how to use org.apache.lucene.search.IndexSearcher#createNormalizedWeight() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FiltersAggregator.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket,
        List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
    IndexSearcher contextSearcher = context.searchContext().searcher();
    if (searcher != contextSearcher) {
        searcher = contextSearcher;
        weights = new Weight[filters.size()];
        for (int i = 0; i < filters.size(); ++i) {
            KeyedFilter keyedFilter = filters.get(i);
            this.weights[i] = contextSearcher.createNormalizedWeight(keyedFilter.filter, false);
        }
    }
    return new FiltersAggregator(name, factories, keys, weights, keyed, otherBucketKey, context, parent, pipelineAggregators, metaData);
}
 
Example 2
Source File: FilterAggregator.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket,
        List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) throws IOException {
    IndexSearcher contextSearcher = context.searchContext().searcher();
    if (searcher != contextSearcher) {
        searcher = contextSearcher;
        weight = contextSearcher.createNormalizedWeight(filter, false);
    }
    return new FilterAggregator(name, weight, factories, context, parent, pipelineAggregators, metaData);
}
 
Example 3
Source File: FiltersFunctionScoreQuery.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    if (needsScores == false && minScore == null) {
        return subQuery.createWeight(searcher, needsScores);
    }

    boolean subQueryNeedsScores = combineFunction != CombineFunction.REPLACE;
    Weight[] filterWeights = new Weight[filterFunctions.length];
    for (int i = 0; i < filterFunctions.length; ++i) {
        subQueryNeedsScores |= filterFunctions[i].function.needsScores();
        filterWeights[i] = searcher.createNormalizedWeight(filterFunctions[i].filter, false);
    }
    Weight subQueryWeight = subQuery.createWeight(searcher, subQueryNeedsScores);
    return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights, subQueryNeedsScores);
}
 
Example 4
Source File: NestedAggregator.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
    // Reset parentFilter, so we resolve the parentDocs for each new segment being searched
    this.parentFilter = null;
    final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
    final IndexSearcher searcher = new IndexSearcher(topLevelContext);
    searcher.setQueryCache(null);
    final Weight weight = searcher.createNormalizedWeight(childFilter, false);
    Scorer childDocsScorer = weight.scorer(ctx);
    if (childDocsScorer == null) {
        childDocs = null;
    } else {
        childDocs = childDocsScorer.iterator();
    }

    return new LeafBucketCollectorBase(sub, null) {
        @Override
        public void collect(int parentDoc, long bucket) throws IOException {
            // here we translate the parent doc to a list of its nested docs, and then call super.collect for evey one of them so they'll be collected

            // if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent doc), so we can skip:
            if (parentDoc == 0 || childDocs == null) {
                return;
            }
            if (parentFilter == null) {
                // The aggs are instantiated in reverse, first the most inner nested aggs and lastly the top level aggs
                // So at the time a nested 'nested' aggs is parsed its closest parent nested aggs hasn't been constructed.
                // So the trick is to set at the last moment just before needed and we can use its child filter as the
                // parent filter.

                // Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the the assumption
                // that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during
                // aggs execution
                Query parentFilterNotCached = findClosestNestedPath(parent());
                if (parentFilterNotCached == null) {
                    parentFilterNotCached = Queries.newNonNestedFilter();
                }
                parentFilter = context.searchContext().bitsetFilterCache().getBitSetProducer(parentFilterNotCached);
                parentDocs = parentFilter.getBitSet(ctx);
                if (parentDocs == null) {
                    // There are no parentDocs in the segment, so return and set childDocs to null, so we exit early for future invocations.
                    childDocs = null;
                    return;
                }
            }

            final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
            int childDocId = childDocs.docID();
            if (childDocId <= prevParentDoc) {
                childDocId = childDocs.advance(prevParentDoc + 1);
            }

            for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
                collectBucket(sub, childDocId, bucket);
            }
        }
    };
}
 
Example 5
Source File: GeoDistanceRangeQuery.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException {
    final Weight boundingBoxWeight;
    if (boundingBoxFilter != null) {
        boundingBoxWeight = searcher.createNormalizedWeight(boundingBoxFilter, false);
    } else {
        boundingBoxWeight = null;
    }
    return new ConstantScoreWeight(this) {
        @Override
        public Scorer scorer(LeafReaderContext context) throws IOException {
            final DocIdSetIterator approximation;
            if (boundingBoxWeight != null) {
                Scorer s = boundingBoxWeight.scorer(context);
                if (s == null) {
                    // if the approximation does not match anything, we're done
                    return null;
                }
                approximation = s.iterator();
            } else {
                approximation = DocIdSetIterator.all(context.reader().maxDoc());
            }
            final MultiGeoPointValues values = indexFieldData.load(context).getGeoPointValues();
            final TwoPhaseIterator twoPhaseIterator = new TwoPhaseIterator(approximation) {
                @Override
                public boolean matches() throws IOException {
                    final int doc = approximation.docID();
                    values.setDocument(doc);
                    final int length = values.count();
                    for (int i = 0; i < length; i++) {
                        GeoPoint point = values.valueAt(i);
                        if (distanceBoundingCheck.isWithin(point.lat(), point.lon())) {
                            double d = fixedSourceDistance.calculate(point.lat(), point.lon());
                            if (d >= inclusiveLowerPoint && d <= inclusiveUpperPoint) {
                                return true;
                            }
                        }
                    }
                    return false;
                }

                @Override
                public float matchCost() {
                    if (distanceBoundingCheck == GeoDistance.ALWAYS_INSTANCE) {
                        return 0.0f;
                    } else {
                        // TODO: is this right (up to 4 comparisons from GeoDistance.SimpleDistanceBoundingCheck)?
                        return 4.0f;
                    }
                }
            };
            return new ConstantScoreScorer(this, score(), twoPhaseIterator);
        }
    };
}
 
Example 6
Source File: FilterableTermsEnum.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @Nullable Query filter) throws IOException {
    if ((docsEnumFlag != PostingsEnum.FREQS) && (docsEnumFlag != PostingsEnum.NONE)) {
        throw new IllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag);
    }
    this.docsEnumFlag = docsEnumFlag;
    if (filter == null) {
        // Important - need to use the doc count that includes deleted docs
        // or we have this issue: https://github.com/elasticsearch/elasticsearch/issues/7951
        numDocs = reader.maxDoc();
    }
    List<LeafReaderContext> leaves = reader.leaves();
    List<Holder> enums = new ArrayList<>(leaves.size());
    final Weight weight;
    if (filter == null) {
        weight = null;
    } else {
        final IndexSearcher searcher = new IndexSearcher(reader);
        searcher.setQueryCache(null);
        weight = searcher.createNormalizedWeight(filter, false);
    }
    for (LeafReaderContext context : leaves) {
        Terms terms = context.reader().terms(field);
        if (terms == null) {
            continue;
        }
        TermsEnum termsEnum = terms.iterator();
        if (termsEnum == null) {
            continue;
        }
        BitSet bits = null;
        if (weight != null) {
            Scorer scorer = weight.scorer(context);
            if (scorer == null) {
                // fully filtered, none matching, no need to iterate on this
                continue;
            }
            DocIdSetIterator docs = scorer.iterator();

            // we want to force apply deleted docs
            final Bits liveDocs = context.reader().getLiveDocs();
            if (liveDocs != null) {
                docs = new FilteredDocIdSetIterator(docs) {
                    @Override
                    protected boolean match(int doc) {
                        return liveDocs.get(doc);
                    }
                };
            }

            BitDocIdSet.Builder builder = new BitDocIdSet.Builder(context.reader().maxDoc());
            builder.or(docs);
            bits = builder.build().bits();

            // Count how many docs are in our filtered set
            // TODO make this lazy-loaded only for those that need it?
            numDocs += bits.cardinality();
        }
        enums.add(new Holder(termsEnum, bits));
    }
    this.enums = enums.toArray(new Holder[enums.size()]);
}