Java Code Examples for org.apache.lucene.index.ReaderUtil#getTopLevelContext()

The following examples show how to use org.apache.lucene.index.ReaderUtil#getTopLevelContext() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LRUQueryCache.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public boolean test(LeafReaderContext context) {
  final int maxDoc = context.reader().maxDoc();
  if (maxDoc < minSize) {
    return false;
  }
  final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
  final float sizeRatio = (float) context.reader().maxDoc() / topLevelContext.reader().maxDoc();
  return sizeRatio >= minSizeRatio;
}
 
Example 2
Source File: GraphTermsQParserPlugin.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public DocSet getDocSet(IndexSearcher searcher) throws IOException {
  IndexReaderContext top = ReaderUtil.getTopLevelContext(searcher.getTopReaderContext());
  List<LeafReaderContext> segs = top.leaves();
  DocSetBuilder builder = new DocSetBuilder(top.reader().maxDoc(), Math.min(64,(top.reader().maxDoc()>>>10)+4));
  PointValues[] segPoints = new PointValues[segs.size()];
  for (int i=0; i<segPoints.length; i++) {
    segPoints[i] = segs.get(i).reader().getPointValues(field);
  }

  int maxCollect = Math.min(maxDocFreq, top.reader().maxDoc());

  PointSetQuery.CutoffPointVisitor visitor = new PointSetQuery.CutoffPointVisitor(maxCollect);
  PrefixCodedTerms.TermIterator iterator = sortedPackedPoints.iterator();
  outer: for (BytesRef point = iterator.next(); point != null; point = iterator.next()) {
    visitor.setPoint(point);
    for (int i=0; i<segs.size(); i++) {
      if (segPoints[i] == null) continue;
      visitor.setBase(segs.get(i).docBase);
      segPoints[i].intersect(visitor);
      if (visitor.getCount() > maxDocFreq) {
        continue outer;
      }
    }
    int collected = visitor.getCount();
    int[] ids = visitor.getGlobalIds();
    for (int i=0; i<collected; i++) {
      builder.add( ids[i] );
    }
  }

  FixedBitSet liveDocs = getLiveDocs(searcher);
  DocSet set = builder.build(liveDocs);
  return set;
}
 
Example 3
Source File: NestedAggregator.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public LeafBucketCollector getLeafCollector(final LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException {
    // Reset parentFilter, so we resolve the parentDocs for each new segment being searched
    this.parentFilter = null;
    final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(ctx);
    final IndexSearcher searcher = new IndexSearcher(topLevelContext);
    searcher.setQueryCache(null);
    final Weight weight = searcher.createNormalizedWeight(childFilter, false);
    Scorer childDocsScorer = weight.scorer(ctx);
    if (childDocsScorer == null) {
        childDocs = null;
    } else {
        childDocs = childDocsScorer.iterator();
    }

    return new LeafBucketCollectorBase(sub, null) {
        @Override
        public void collect(int parentDoc, long bucket) throws IOException {
            // here we translate the parent doc to a list of its nested docs, and then call super.collect for evey one of them so they'll be collected

            // if parentDoc is 0 then this means that this parent doesn't have child docs (b/c these appear always before the parent doc), so we can skip:
            if (parentDoc == 0 || childDocs == null) {
                return;
            }
            if (parentFilter == null) {
                // The aggs are instantiated in reverse, first the most inner nested aggs and lastly the top level aggs
                // So at the time a nested 'nested' aggs is parsed its closest parent nested aggs hasn't been constructed.
                // So the trick is to set at the last moment just before needed and we can use its child filter as the
                // parent filter.

                // Additional NOTE: Before this logic was performed in the setNextReader(...) method, but the the assumption
                // that aggs instances are constructed in reverse doesn't hold when buckets are constructed lazily during
                // aggs execution
                Query parentFilterNotCached = findClosestNestedPath(parent());
                if (parentFilterNotCached == null) {
                    parentFilterNotCached = Queries.newNonNestedFilter();
                }
                parentFilter = context.searchContext().bitsetFilterCache().getBitSetProducer(parentFilterNotCached);
                parentDocs = parentFilter.getBitSet(ctx);
                if (parentDocs == null) {
                    // There are no parentDocs in the segment, so return and set childDocs to null, so we exit early for future invocations.
                    childDocs = null;
                    return;
                }
            }

            final int prevParentDoc = parentDocs.prevSetBit(parentDoc - 1);
            int childDocId = childDocs.docID();
            if (childDocId <= prevParentDoc) {
                childDocId = childDocs.advance(prevParentDoc + 1);
            }

            for (; childDocId < parentDoc; childDocId = childDocs.nextDoc()) {
                collectBucket(sub, childDocId, bucket);
            }
        }
    };
}
 
Example 4
Source File: DoubleRangeFacetCounts.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private void count(DoubleValuesSource valueSource, List<MatchingDocs> matchingDocs) throws IOException {

    DoubleRange[] ranges = (DoubleRange[]) this.ranges;

    LongRange[] longRanges = new LongRange[ranges.length];
    for(int i=0;i<ranges.length;i++) {
      DoubleRange range = ranges[i];
      longRanges[i] =  new LongRange(range.label,
                                     NumericUtils.doubleToSortableLong(range.min), true,
                                     NumericUtils.doubleToSortableLong(range.max), true);
    }

    LongRangeCounter counter = new LongRangeCounter(longRanges);

    int missingCount = 0;
    for (MatchingDocs hits : matchingDocs) {
      DoubleValues fv = valueSource.getValues(hits.context, null);
      
      totCount += hits.totalHits;
      final DocIdSetIterator fastMatchDocs;
      if (fastMatchQuery != null) {
        final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(hits.context);
        final IndexSearcher searcher = new IndexSearcher(topLevelContext);
        searcher.setQueryCache(null);
        final Weight fastMatchWeight = searcher.createWeight(searcher.rewrite(fastMatchQuery), ScoreMode.COMPLETE_NO_SCORES, 1);
        Scorer s = fastMatchWeight.scorer(hits.context);
        if (s == null) {
          continue;
        }
        fastMatchDocs = s.iterator();
      } else {
        fastMatchDocs = null;
      }

      DocIdSetIterator docs = hits.bits.iterator();

      for (int doc = docs.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; ) {
        if (fastMatchDocs != null) {
          int fastMatchDoc = fastMatchDocs.docID();
          if (fastMatchDoc < doc) {
            fastMatchDoc = fastMatchDocs.advance(doc);
          }

          if (doc != fastMatchDoc) {
            doc = docs.advance(fastMatchDoc);
            continue;
          }
        }
        // Skip missing docs:
        if (fv.advanceExact(doc)) {
          counter.add(NumericUtils.doubleToSortableLong(fv.doubleValue()));
        } else {
          missingCount++;
        }

        doc = docs.nextDoc();
      }
    }

    missingCount += counter.fillCounts(counts);
    totCount -= missingCount;
  }
 
Example 5
Source File: LongRangeFacetCounts.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private void count(LongValuesSource valueSource, List<MatchingDocs> matchingDocs) throws IOException {

    LongRange[] ranges = (LongRange[]) this.ranges;

    LongRangeCounter counter = new LongRangeCounter(ranges);

    int missingCount = 0;
    for (MatchingDocs hits : matchingDocs) {
      LongValues fv = valueSource.getValues(hits.context, null);
      
      totCount += hits.totalHits;
      final DocIdSetIterator fastMatchDocs;
      if (fastMatchQuery != null) {
        final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(hits.context);
        final IndexSearcher searcher = new IndexSearcher(topLevelContext);
        searcher.setQueryCache(null);
        final Weight fastMatchWeight = searcher.createWeight(searcher.rewrite(fastMatchQuery), ScoreMode.COMPLETE_NO_SCORES, 1);
        Scorer s = fastMatchWeight.scorer(hits.context);
        if (s == null) {
          continue;
        }
        fastMatchDocs = s.iterator();
      } else {
        fastMatchDocs = null;
      }

      DocIdSetIterator docs = hits.bits.iterator();      
      for (int doc = docs.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; ) {
        if (fastMatchDocs != null) {
          int fastMatchDoc = fastMatchDocs.docID();
          if (fastMatchDoc < doc) {
            fastMatchDoc = fastMatchDocs.advance(doc);
          }

          if (doc != fastMatchDoc) {
            doc = docs.advance(fastMatchDoc);
            continue;
          }
        }
        // Skip missing docs:
        if (fv.advanceExact(doc)) {
          counter.add(fv.longValue());
        } else {
          missingCount++;
        }

        doc = docs.nextDoc();
      }
    }
    
    int x = counter.fillCounts(counts);

    missingCount += x;

    //System.out.println("totCount " + totCount + " x " + x + " missingCount " + missingCount);
    totCount -= missingCount;
  }