Java Code Examples for org.apache.lucene.index.IndexReaderContext

The following examples show how to use org.apache.lucene.index.IndexReaderContext. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: Elasticsearch   Source File: BlendedTermQuery.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Query rewrite(IndexReader reader) throws IOException {
    IndexReaderContext context = reader.getContext();
    TermContext[] ctx = new TermContext[terms.length];
    int[] docFreqs = new int[ctx.length];
    for (int i = 0; i < terms.length; i++) {
        ctx[i] = TermContext.build(context, terms[i]);
        docFreqs[i] = ctx[i].docFreq();
    }

    final int maxDoc = reader.maxDoc();
    blend(ctx, maxDoc, reader);
    Query query = topLevelQuery(terms, ctx, docFreqs, maxDoc);
    query.setBoost(getBoost());
    return query;
}
 
Example 2
Source Project: lucene-solr   Source File: BlendedTermQuery.java    License: Apache License 2.0 6 votes vote down vote up
private static TermStates adjustFrequencies(IndexReaderContext readerContext,
                                            TermStates ctx, int artificialDf, long artificialTtf) throws IOException {
  List<LeafReaderContext> leaves = readerContext.leaves();
  final int len;
  if (leaves == null) {
    len = 1;
  } else {
    len = leaves.size();
  }
  TermStates newCtx = new TermStates(readerContext);
  for (int i = 0; i < len; ++i) {
    TermState termState = ctx.get(leaves.get(i));
    if (termState == null) {
      continue;
    }
    newCtx.register(termState, i);
  }
  newCtx.accumulateStatistics(artificialDf, artificialTtf);
  return newCtx;
}
 
Example 3
Source Project: lucene-solr   Source File: HashQParserPlugin.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {

  SolrIndexSearcher solrIndexSearcher = (SolrIndexSearcher)searcher;
  IndexReaderContext context = solrIndexSearcher.getTopReaderContext();

  List<LeafReaderContext> leaves =  context.leaves();
  FixedBitSet[] fixedBitSets = new FixedBitSet[leaves.size()];

  for(LeafReaderContext leaf : leaves) {
    try {
      SegmentPartitioner segmentPartitioner = new SegmentPartitioner(leaf,worker,workers, keys, solrIndexSearcher);
      segmentPartitioner.run();
      fixedBitSets[segmentPartitioner.context.ord] = segmentPartitioner.docs;
    } catch(Exception e) {
      throw new IOException(e);
    }
  }

  ConstantScoreQuery constantScoreQuery = new ConstantScoreQuery(new BitsFilter(fixedBitSets));
  return searcher.rewrite(constantScoreQuery).createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
}
 
Example 4
public SecureIndexSearcher(IndexReaderContext context, ExecutorService executor,
    AccessControlFactory accessControlFactory, Collection<String> readAuthorizations,
    Collection<String> discoverAuthorizations, Set<String> discoverableFields, String defaultReadMaskMessage)
    throws IOException {
  super(context, executor);
  _accessControlFactory = accessControlFactory;
  _readAuthorizations = readAuthorizations;
  _discoverAuthorizations = discoverAuthorizations;
  _discoverableFields = discoverableFields;
  _defaultReadMaskMessage = defaultReadMaskMessage;
  _accessControlReader = _accessControlFactory.getReader(readAuthorizations, discoverAuthorizations,
      discoverableFields, _defaultReadMaskMessage);
  _secureIndexReader = getSecureIndexReader(context);
  List<AtomicReaderContext> leaves = _secureIndexReader.leaves();
  _leaveMap = new HashMap<Object, AtomicReaderContext>();
  for (AtomicReaderContext atomicReaderContext : leaves) {
    AtomicReader atomicReader = atomicReaderContext.reader();
    SecureAtomicReader secureAtomicReader = (SecureAtomicReader) atomicReader;
    AtomicReader originalReader = secureAtomicReader.getOriginalReader();
    Object coreCacheKey = originalReader.getCoreCacheKey();
    _leaveMap.put(coreCacheKey, atomicReaderContext);
  }
}
 
Example 5
Source Project: crate   Source File: BlendedTermQuery.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Query rewrite(IndexReader reader) throws IOException {
    Query rewritten = super.rewrite(reader);
    if (rewritten != this) {
        return rewritten;
    }
    IndexReaderContext context = reader.getContext();
    TermStates[] ctx = new TermStates[terms.length];
    int[] docFreqs = new int[ctx.length];
    for (int i = 0; i < terms.length; i++) {
        ctx[i] = TermStates.build(context, terms[i], true);
        docFreqs[i] = ctx[i].docFreq();
    }

    final int maxDoc = reader.maxDoc();
    blend(ctx, maxDoc, reader);
    return topLevelQuery(terms, ctx, docFreqs, maxDoc);
}
 
Example 6
Source Project: ltr4l   Source File: DefaultLTRQParserPlugin.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Query parse() throws SyntaxError {
  IndexReaderContext context = req.getSearcher().getTopReaderContext();
  for(FieldFeatureExtractorFactory factory: featuresSpec){
    String fieldName = factory.getFieldName();
    FieldType fieldType = req.getSchema().getFieldType(fieldName);
    Analyzer analyzer = fieldType.getQueryAnalyzer();
    factory.init(context, FieldFeatureExtractorFactory.terms(fieldName, qstr, analyzer));
  }

  return new DefaultLTRQuery(featuresSpec, ranker);
}
 
Example 7
Source Project: ltr4l   Source File: AbstractLTRQueryTestCase.java    License: Apache License 2.0 5 votes vote down vote up
protected FieldFeatureExtractorFactory getTF(String featureName, String fieldName, IndexReaderContext context, Term... terms){
  FieldFeatureExtractorFactory factory = new FieldFeatureTFExtractorFactory(featureName, fieldName);
  if(context != null){
    factory.init(context, terms);
  }
  return factory;
}
 
Example 8
Source Project: ltr4l   Source File: AbstractLTRQueryTestCase.java    License: Apache License 2.0 5 votes vote down vote up
protected FieldFeatureExtractorFactory getIDF(String featureName, String fieldName, IndexReaderContext context, Term... terms){
  FieldFeatureExtractorFactory factory = new FieldFeatureIDFExtractorFactory(featureName, fieldName);
  if(context != null){
    factory.init(context, terms);
  }
  return factory;
}
 
Example 9
Source Project: ltr4l   Source File: AbstractLTRQueryTestCase.java    License: Apache License 2.0 5 votes vote down vote up
protected FieldFeatureExtractorFactory getTFIDF(String featureName, String fieldName, IndexReaderContext context, Term... terms){
  FieldFeatureExtractorFactory factory = new FieldFeatureTFIDFExtractorFactory(featureName, fieldName);
  if(context != null){
    factory.init(context, terms);
  }
  return factory;
}
 
Example 10
Source Project: ltr4l   Source File: AbstractLTRQueryTestCase.java    License: Apache License 2.0 5 votes vote down vote up
protected FieldFeatureExtractorFactory getSV(String featureName, String fieldName, IndexReaderContext context, Term... terms){
  FieldFeatureExtractorFactory factory = new FieldFeatureStoredValueExtractorFactory(featureName, fieldName);
  if(context != null){
    factory.init(context, terms);
  }
  return factory;
}
 
Example 11
Source Project: Elasticsearch   Source File: BitsetFilterCache.java    License: Apache License 2.0 5 votes vote down vote up
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
    final Object coreCacheReader = context.reader().getCoreCacheKey();
    final ShardId shardId = ShardUtils.extractShardId(context.reader());
    if (shardId != null // can't require it because of the percolator
            && index.getName().equals(shardId.getIndex()) == false) {
        // insanity
        throw new IllegalStateException("Trying to load bit set for index [" + shardId.getIndex()
                + "] with cache of index [" + index.getName() + "]");
    }
    Cache<Query, Value> filterToFbs = loadedFilters.get(coreCacheReader, new Callable<Cache<Query, Value>>() {
        @Override
        public Cache<Query, Value> call() throws Exception {
            context.reader().addCoreClosedListener(BitsetFilterCache.this);
            return CacheBuilder.newBuilder().build();
        }
    });
    return filterToFbs.get(query,new Callable<Value>() {
        @Override
        public Value call() throws Exception {
            final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
            final IndexSearcher searcher = new IndexSearcher(topLevelContext);
            searcher.setQueryCache(null);
            final Weight weight = searcher.createNormalizedWeight(query, false);
            final Scorer s = weight.scorer(context);
            final BitSet bitSet;
            if (s == null) {
                bitSet = null;
            } else {
                bitSet = BitSet.of(s.iterator(), context.reader().maxDoc());
            }

            Value value = new Value(bitSet, shardId);
            listener.onCache(shardId, value.bitset);
            return value;
        }
    }).bitset;
}
 
Example 12
public AbstractAuthorityQueryWeight(SolrIndexSearcher searcher, boolean needsScores, Query query, String authTermName, String authTermText) throws IOException
{
	super(query);
    this.searcher = searcher;
    searcher.collectionStatistics(authTermName);
    final IndexReaderContext context = searcher.getTopReaderContext();
    final Term term = new Term(authTermName, authTermText);
    final TermContext termContext = TermContext.build(context, term);
    searcher.termStatistics(term, termContext);
    this.needsScores = needsScores;
}
 
Example 13
Source Project: linden   Source File: LindenFieldCacheImpl.java    License: Apache License 2.0 5 votes vote down vote up
public UIDMaps getUIDMaps(IndexReaderContext topReaderContext, String uidField) throws IOException {
  PerReaderUIDMaps[] uidMapsArray = new PerReaderUIDMaps[topReaderContext.leaves().size()];
  for (int i = 0; i < topReaderContext.leaves().size(); ++i) {
    uidMapsArray[i] = (PerReaderUIDMaps) caches.get(UIDCache.class)
        .get(topReaderContext.leaves().get(i).reader(), new CacheKey(uidField, null), false);
  }
  return new UIDMaps(uidMapsArray);
}
 
Example 14
Source Project: lucene-solr   Source File: NumberRangePrefixTreeStrategy.java    License: Apache License 2.0 5 votes vote down vote up
/** Calculates facets between {@code start} and {@code end} to a detail level one greater than that provided by the
 * arguments. For example providing March to October of 2014 would return facets to the day level of those months.
 * This is just a convenience method.
 * @see #calcFacets(IndexReaderContext, Bits, Shape, int)
 */
public Facets calcFacets(IndexReaderContext context, Bits topAcceptDocs, UnitNRShape start, UnitNRShape end)
    throws IOException {
  Shape facetRange = getGrid().toRangeShape(start, end);
  int detailLevel = Math.max(start.getLevel(), end.getLevel()) + 1;
  return calcFacets(context, topAcceptDocs, facetRange, detailLevel);
}
 
Example 15
Source Project: lucene-solr   Source File: TestGrouping.java    License: Apache License 2.0 5 votes vote down vote up
public ShardState(IndexSearcher s) {
  final IndexReaderContext ctx = s.getTopReaderContext();
  final List<LeafReaderContext> leaves = ctx.leaves();
  subSearchers = new ShardSearcher[leaves.size()];
  for(int searcherIDX=0;searcherIDX<subSearchers.length;searcherIDX++) {
    subSearchers[searcherIDX] = new ShardSearcher(leaves.get(searcherIDX), ctx);
  }

  docStarts = new int[subSearchers.length];
  for(int subIDX=0;subIDX<docStarts.length;subIDX++) {
    docStarts[subIDX] = leaves.get(subIDX).docBase;
    //System.out.println("docStarts[" + subIDX + "]=" + docStarts[subIDX]);
  }
}
 
Example 16
Source Project: lucene-solr   Source File: TermAutomatonQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  IndexReaderContext context = searcher.getTopReaderContext();
  Map<Integer,TermStates> termStates = new HashMap<>();

  for (Map.Entry<BytesRef,Integer> ent : termToID.entrySet()) {
    if (ent.getKey() != null) {
      termStates.put(ent.getValue(), TermStates.build(context, new Term(field, ent.getKey()), scoreMode.needsScores()));
    }
  }

  return new TermAutomatonWeight(det, searcher, termStates, boost);
}
 
Example 17
Source Project: lucene-solr   Source File: TermQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final IndexReaderContext context = searcher.getTopReaderContext();
  final TermStates termState;
  if (perReaderTermState == null
      || perReaderTermState.wasBuiltFor(context) == false) {
    termState = TermStates.build(context, term, scoreMode.needsScores());
  } else {
    // PRTS was pre-build for this IS
    termState = this.perReaderTermState;
  }

  return new TermWeight(searcher, scoreMode, boost, termState);
}
 
Example 18
Source Project: lucene-solr   Source File: IndexSearcher.java    License: Apache License 2.0 5 votes vote down vote up
IndexSearcher(IndexReaderContext context, Executor executor, SliceExecutor sliceExecutor) {
  assert context.isTopLevel: "IndexSearcher's ReaderContext must be topLevel for reader" + context.reader();
  assert (sliceExecutor == null) == (executor==null);

  reader = context.reader();
  this.executor = executor;
  this.sliceExecutor = sliceExecutor;
  this.readerContext = context;
  leafContexts = context.leaves();
  this.leafSlices = executor == null ? null : slices(leafContexts);
}
 
Example 19
Source Project: lucene-solr   Source File: SpanTermQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public SpanWeight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final TermStates context;
  final IndexReaderContext topContext = searcher.getTopReaderContext();
  if (termStates == null || termStates.wasBuiltFor(topContext) == false) {
    context = TermStates.build(topContext, term, scoreMode.needsScores());
  }
  else {
    context = termStates;
  }
  return new SpanTermWeight(context, searcher, scoreMode.needsScores() ? Collections.singletonMap(term, context) : null, boost);
}
 
Example 20
Source Project: lucene-solr   Source File: LRUQueryCache.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public boolean test(LeafReaderContext context) {
  final int maxDoc = context.reader().maxDoc();
  if (maxDoc < minSize) {
    return false;
  }
  final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
  final float sizeRatio = (float) context.reader().maxDoc() / topLevelContext.reader().maxDoc();
  return sizeRatio >= minSizeRatio;
}
 
Example 21
Source Project: lucene-solr   Source File: TestNearSpansOrdered.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * not a direct test of NearSpans, but a demonstration of how/when
 * this causes problems
 */
public void testSpanNearScorerSkipTo1() throws Exception {
  SpanNearQuery q = makeQuery();
  Weight w = searcher.createWeight(searcher.rewrite(q), ScoreMode.COMPLETE, 1);
  IndexReaderContext topReaderContext = searcher.getTopReaderContext();
  LeafReaderContext leave = topReaderContext.leaves().get(0);
  Scorer s = w.scorer(leave);
  assertEquals(1, s.iterator().advance(1));
}
 
Example 22
Source Project: lucene-solr   Source File: GraphTermsQParserPlugin.java    License: Apache License 2.0 5 votes vote down vote up
public DocSet getDocSet(IndexSearcher searcher) throws IOException {
  IndexReaderContext top = ReaderUtil.getTopLevelContext(searcher.getTopReaderContext());
  List<LeafReaderContext> segs = top.leaves();
  DocSetBuilder builder = new DocSetBuilder(top.reader().maxDoc(), Math.min(64,(top.reader().maxDoc()>>>10)+4));
  PointValues[] segPoints = new PointValues[segs.size()];
  for (int i=0; i<segPoints.length; i++) {
    segPoints[i] = segs.get(i).reader().getPointValues(field);
  }

  int maxCollect = Math.min(maxDocFreq, top.reader().maxDoc());

  PointSetQuery.CutoffPointVisitor visitor = new PointSetQuery.CutoffPointVisitor(maxCollect);
  PrefixCodedTerms.TermIterator iterator = sortedPackedPoints.iterator();
  outer: for (BytesRef point = iterator.next(); point != null; point = iterator.next()) {
    visitor.setPoint(point);
    for (int i=0; i<segs.size(); i++) {
      if (segPoints[i] == null) continue;
      visitor.setBase(segs.get(i).docBase);
      segPoints[i].intersect(visitor);
      if (visitor.getCount() > maxDocFreq) {
        continue outer;
      }
    }
    int collected = visitor.getCount();
    int[] ids = visitor.getGlobalIds();
    for (int i=0; i<collected; i++) {
      builder.add( ids[i] );
    }
  }

  FixedBitSet liveDocs = getLiveDocs(searcher);
  DocSet set = builder.build(liveDocs);
  return set;
}
 
Example 23
Source Project: lucene-solr   Source File: TermsComponent.java    License: Apache License 2.0 5 votes vote down vote up
private static void collectTermStates(IndexReaderContext topReaderContext, TermStates[] contextArray,
                                      Term[] queryTerms) throws IOException {
  TermsEnum termsEnum = null;
  for (LeafReaderContext context : topReaderContext.leaves()) {
    for (int i = 0; i < queryTerms.length; i++) {
      Term term = queryTerms[i];
      final Terms terms = context.reader().terms(term.field());
      if (terms == null) {
        // field does not exist
        continue;
      }
      termsEnum = terms.iterator();
      assert termsEnum != null;

      if (termsEnum == TermsEnum.EMPTY) continue;

      TermStates termStates = contextArray[i];
      if (termsEnum.seekExact(term.bytes())) {
        if (termStates == null) {
          termStates = new TermStates(topReaderContext);
          contextArray[i] = termStates;
        }
        termStates.accumulateStatistics(termsEnum.docFreq(), termsEnum.totalTermFreq());
      }
    }
  }
}
 
Example 24
Source Project: lucene-solr   Source File: TestIndexSearcher.java    License: Apache License 2.0 5 votes vote down vote up
@SuppressWarnings({"unchecked"})
private String getStringVal(SolrQueryRequest sqr, String field, int doc) throws IOException {
  SchemaField sf = sqr.getSchema().getField(field);
  ValueSource vs = sf.getType().getValueSource(sf, null);
  @SuppressWarnings({"rawtypes"})
  Map context = ValueSource.newContext(sqr.getSearcher());
  vs.createWeight(context, sqr.getSearcher());
  IndexReaderContext topReaderContext = sqr.getSearcher().getTopReaderContext();
  List<LeafReaderContext> leaves = topReaderContext.leaves();
  int idx = ReaderUtil.subIndex(doc, leaves);
  LeafReaderContext leaf = leaves.get(idx);
  FunctionValues vals = vs.getValues(context, leaf);
  return vals.strVal(doc-leaf.docBase);
}
 
Example 25
Source Project: lucene4ir   Source File: TermsSet.java    License: Apache License 2.0 5 votes vote down vote up
private Set<String> getTerms(IndexReader ir) {
    Set<String> t = new HashSet<>();
    for (int i = 0; i < ir.leaves().size(); i++) {
        Terms termsList;
        try {
            // Get all the terms at this level of the tree.
            termsList = ir.leaves().get(i).reader().terms(Lucene4IRConstants.FIELD_ALL);
            if (termsList != null && termsList.size() > 0) {
                TermsEnum te = termsList.iterator();
                BytesRef termBytes;
                while ((termBytes = te.next()) != null) {
                    t.add(termBytes.utf8ToString());
                }
            }

            // Get all the terms at the next level of the tree.
            if (ir.leaves().get(i).children() != null && ir.leaves().get(i).children().size() > 0) {
                for (IndexReaderContext c : ir.leaves().get(i).children()) {
                    t.addAll(getTerms(c.reader()));
                }
            }

        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    return t;
}
 
Example 26
Source Project: mtas   Source File: MtasSpanMatchAllQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void extractTermContexts(Map<Term, TermContext> contexts) {
  Term term = new Term(field);
  if (!contexts.containsKey(term)) {
    IndexReaderContext topContext = searcher.getTopReaderContext();
    try {
      contexts.put(term, TermContext.build(topContext, term));
    } catch (IOException e) {
      log.debug(e);
      // fail
    }
  }
}
 
Example 27
Source Project: mtas   Source File: MtasExtendedSpanTermQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public SpanWeight createWeight(IndexSearcher searcher, boolean needsScores, float boost)
    throws IOException {
  final TermContext context;
  final IndexReaderContext topContext = searcher.getTopReaderContext();
  if (termContext == null) {
    context = TermContext.build(topContext, localTerm);
  } else {
    context = termContext;
  }
  return new SpanTermWeight(context, searcher,
      needsScores ? Collections.singletonMap(localTerm, context) : null, boost);
}
 
Example 28
Source Project: querqy   Source File: DocumentFrequencyCorrection.java    License: Apache License 2.0 5 votes vote down vote up
public DocumentFrequencyAndTermContext getDocumentFrequencyAndTermContext(final int tqIndex,
                                                                          final IndexReaderContext indexReaderContext)
        throws IOException {

    TermStats ts = termStats;
    if (ts == null || ts.topReaderContext != indexReaderContext) {
        ts = calculateTermContexts(indexReaderContext);
    }

    return new DocumentFrequencyAndTermContext(ts.documentFrequencies[tqIndex], ts.termStates[tqIndex]);
}
 
Example 29
Source Project: querqy   Source File: FieldBoostTermQueryBuilder.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Weight createWeight(final IndexSearcher searcher, final ScoreMode scoreMode, final float boost)
        throws IOException {
    final IndexReaderContext context = searcher.getTopReaderContext();
    final TermStates termState = TermStates.build(context, term, scoreMode.needsScores());
    // TODO: set boosts to 1f if needsScores is false?
    return new FieldBoostWeight(termState, boost, fieldBoost.getBoost(term.field(), searcher.getIndexReader()));
}
 
Example 30
@Override
public long getSegmentCount() throws IOException {
  IndexSearcherCloseable indexSearcherClosable = getIndexSearcher(false);
  try {
    IndexReader indexReader = indexSearcherClosable.getIndexReader();
    IndexReaderContext context = indexReader.getContext();
    return context.leaves().size();
  } finally {
    indexSearcherClosable.close();
  }
}