Java Code Examples for org.apache.lucene.index.LeafReaderContext

The following examples show how to use org.apache.lucene.index.LeafReaderContext. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: lucene-solr   Source File: ToParentBlockJoinSortField.java    License: Apache License 2.0 6 votes vote down vote up
private FieldComparator<?> getFloatComparator(int numHits) {
  return new FieldComparator.FloatComparator(numHits, getField(), (Float) missingValue) {
    @Override
    protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) throws IOException {
      SortedNumericDocValues sortedNumeric = DocValues.getSortedNumeric(context.reader(), field);
      final BlockJoinSelector.Type type = order
          ? BlockJoinSelector.Type.MAX
          : BlockJoinSelector.Type.MIN;
      final BitSet parents = parentFilter.getBitSet(context);
      final BitSet children = childFilter.getBitSet(context);
      if (children == null) {
        return DocValues.emptyNumeric();
      }
      return new FilterNumericDocValues(BlockJoinSelector.wrap(sortedNumeric, type, parents, toIter(children))) {
        @Override
        public long longValue() throws IOException {
          // undo the numericutils sortability
          return NumericUtils.sortableFloatBits((int) super.longValue());
        }
      };
    }
  };
}
 
Example 2
Source Project: onedev   Source File: DefaultIndexManager.java    License: MIT License 6 votes vote down vote up
private String getCommitIndexVersion(final IndexSearcher searcher, AnyObjectId commitId) throws IOException {
	final AtomicReference<String> indexVersion = new AtomicReference<>(null);
	
	searcher.search(COMMIT_HASH.query(commitId.getName()), new SimpleCollector() {

		private int docBase;
		
		@Override
		public void collect(int doc) throws IOException {
			indexVersion.set(searcher.doc(docBase+doc).get(COMMIT_INDEX_VERSION.name()));
		}

		@Override
		protected void doSetNextReader(LeafReaderContext context) throws IOException {
			docBase = context.docBase;
		}

		@Override
		public boolean needsScores() {
			return false;
		}

	});
	return indexVersion.get();
}
 
Example 3
Source Project: lucene-solr   Source File: LatLonPointSpatialField.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public DoubleValues getValues(LeafReaderContext ctx, DoubleValues scores) throws IOException {
  return new DoubleValues() {

    @SuppressWarnings("unchecked")
    final FieldComparator<Double> comparator =
        (FieldComparator<Double>) getSortField(false).getComparator(1, 1);
    final LeafFieldComparator leafComparator = comparator.getLeafComparator(ctx);
    final double mult = multiplier; // so it's a local field

    double value = Double.POSITIVE_INFINITY;

    @Override
    public double doubleValue() throws IOException {
      return value;
    }

    @Override
    public boolean advanceExact(int doc) throws IOException {
      leafComparator.copy(0, doc);
      value = comparator.value(0) * mult;
      return true;
    }
  };
}
 
Example 4
Source Project: Elasticsearch   Source File: CrateDocCollector.java    License: Apache License 2.0 6 votes vote down vote up
private Result collectLeaves(SimpleCollector collector,
                             Weight weight,
                             Iterator<LeafReaderContext> leaves,
                             @Nullable BulkScorer bulkScorer,
                             @Nullable LeafReaderContext leaf) throws IOException {
    if (bulkScorer != null) {
        assert leaf != null : "leaf must not be null if bulkScorer isn't null";
        if (processScorer(collector, leaf, bulkScorer)) return Result.PAUSED;
    }
    try {
        while (leaves.hasNext()) {
            leaf = leaves.next();
            LeafCollector leafCollector = collector.getLeafCollector(leaf);
            Scorer scorer = weight.scorer(leaf);
            if (scorer == null) {
                continue;
            }
            bulkScorer = new DefaultBulkScorer(scorer);
            if (processScorer(leafCollector, leaf, bulkScorer)) return Result.PAUSED;
        }
    } finally {
        searchContext.clearReleasables(SearchContext.Lifetime.COLLECTION);
    }
    return Result.FINISHED;
}
 
Example 5
@Override
public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int doc) throws IOException {
  Terms terms = ctx.reader().terms(field);
  if (terms == null)
    return null;
  if (terms.hasPositions() == false) {
    throw new IllegalArgumentException("Cannot create an IntervalIterator over field " + field + " because it has no indexed positions");
  }
  if (terms.hasPayloads() == false) {
    throw new IllegalArgumentException("Cannot create a payload-filtered iterator over field " + field + " because it has no indexed payloads");
  }
  TermsEnum te = terms.iterator();
  if (te.seekExact(term) == false) {
    return null;
  }
  return matches(te, doc);
}
 
Example 6
Source Project: lucene-solr   Source File: TestLRUQueryCache.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  return new ConstantScoreWeight(this, 1) {

    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {
      scorerCreatedCount.incrementAndGet();
      return new ConstantScoreScorer(this, 1, scoreMode, DocIdSetIterator.all(context.reader().maxDoc()));
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return DocValues.isCacheable(ctx, field);
    }

  };
}
 
Example 7
Source Project: Elasticsearch   Source File: IncludeNestedDocsQuery.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
    final Scorer parentScorer = parentWeight.scorer(context);

    // no matches
    if (parentScorer == null) {
        return null;
    }

    BitSet parents = parentsFilter.getBitSet(context);
    if (parents == null) {
        // No matches
        return null;
    }

    int firstParentDoc = parentScorer.iterator().nextDoc();
    if (firstParentDoc == DocIdSetIterator.NO_MORE_DOCS) {
        // No matches
        return null;
    }
    return new IncludeNestedDocsScorer(this, parentScorer, parents, firstParentDoc);
}
 
Example 8
@Override
public IntervalMatchesIterator matches(String field, LeafReaderContext ctx, int doc) throws IOException {
  Map<IntervalIterator, CachingMatchesIterator> lookup = new IdentityHashMap<>();
  for (IntervalsSource source : sources) {
    IntervalMatchesIterator mi = source.matches(field, ctx, doc);
    if (mi != null) {
      CachingMatchesIterator cmi = new CachingMatchesIterator(mi);
      lookup.put(IntervalMatches.wrapMatches(cmi, doc), cmi);
    }
  }
  if (lookup.size() < minShouldMatch) {
    return null;
  }
  MinimumShouldMatchIntervalIterator it = new MinimumShouldMatchIntervalIterator(lookup.keySet(), minShouldMatch);
  if (it.advance(doc) != doc) {
    return null;
  }
  if (it.nextInterval() == IntervalIterator.NO_MORE_INTERVALS) {
    return null;
  }
  return new MinimumMatchesIterator(it, lookup);
}
 
Example 9
Source Project: Elasticsearch   Source File: NativeScriptEngineService.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public SearchScript search(CompiledScript compiledScript, final SearchLookup lookup, @Nullable final Map<String, Object> vars) {
    final NativeScriptFactory scriptFactory = (NativeScriptFactory) compiledScript.compiled();
    return new SearchScript() {
        @Override
        public LeafSearchScript getLeafSearchScript(LeafReaderContext context) throws IOException {
            AbstractSearchScript script = (AbstractSearchScript) scriptFactory.newScript(vars);
            script.setLookup(lookup.getLeafSearchLookup(context));
            return script;
        }
        @Override
        public boolean needsScores() {
            return scriptFactory.needsScores();
        }
    };
}
 
Example 10
Source Project: lucene-solr   Source File: MultiCollector.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
  final List<LeafCollector> leafCollectors = new ArrayList<>(collectors.length);
  for (Collector collector : collectors) {
    final LeafCollector leafCollector;
    try {
      leafCollector = collector.getLeafCollector(context);
    } catch (CollectionTerminatedException e) {
      // this leaf collector does not need this segment
      continue;
    }
    leafCollectors.add(leafCollector);
  }
  switch (leafCollectors.size()) {
    case 0:
      throw new CollectionTerminatedException();
    case 1:
      return leafCollectors.get(0);
    default:
      return new MultiLeafCollector(leafCollectors, cacheScores, scoreMode() == ScoreMode.TOP_SCORES);
  }
}
 
Example 11
Source Project: lucene-solr   Source File: TestBlockJoinValidation.java    License: Apache License 2.0 6 votes vote down vote up
public void testAdvanceValidationForToChildBjq() throws Exception {
  Query parentQuery = new MatchAllDocsQuery();
  ToChildBlockJoinQuery blockJoinQuery = new ToChildBlockJoinQuery(parentQuery, parentsFilter);

  final LeafReaderContext context = indexSearcher.getIndexReader().leaves().get(0);
  Weight weight = indexSearcher.createWeight(indexSearcher.rewrite(blockJoinQuery), org.apache.lucene.search.ScoreMode.COMPLETE, 1);
  Scorer scorer = weight.scorer(context);
  final Bits parentDocs = parentsFilter.getBitSet(context);

  int target;
  do {
    // make the parent scorer advance to a doc ID which is not a parent
    target = TestUtil.nextInt(random(), 0, context.reader().maxDoc() - 2);
  } while (parentDocs.get(target + 1));

  final int illegalTarget = target;
  IllegalStateException expected = expectThrows(IllegalStateException.class, () -> {
    scorer.iterator().advance(illegalTarget);
  });
  assertTrue(expected.getMessage() != null && expected.getMessage().contains(ToChildBlockJoinQuery.INVALID_QUERY_MESSAGE));
}
 
Example 12
Source Project: Elasticsearch   Source File: FunctionScoreQuery.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
    Explanation subQueryExpl = subQueryWeight.explain(context, doc);
    if (!subQueryExpl.isMatch()) {
        return subQueryExpl;
    }
    Explanation expl;
    if (function != null) {
        Explanation functionExplanation = function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl);
        expl = combineFunction.explain(subQueryExpl, functionExplanation, maxBoost);
    } else {
        expl = subQueryExpl;
    }
    if (minScore != null && minScore > expl.getValue()) {
        expl = Explanation.noMatch("Score value is too low, expected at least " + minScore + " but got " + expl.getValue(), expl);
    }
    return expl;
}
 
Example 13
Source Project: Elasticsearch   Source File: ValueCountAggregator.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
        final LeafBucketCollector sub) throws IOException {
    if (valuesSource == null) {
        return LeafBucketCollector.NO_OP_COLLECTOR;
    }
    final BigArrays bigArrays = context.bigArrays();
    final SortedBinaryDocValues values = valuesSource.bytesValues(ctx);
    return new LeafBucketCollectorBase(sub, values) {

        @Override
        public void collect(int doc, long bucket) throws IOException {
            counts = bigArrays.grow(counts, bucket + 1);
            values.setDocument(doc);
            counts.increment(bucket, values.count());
        }

    };
}
 
Example 14
Source Project: Elasticsearch   Source File: MinAggregator.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LeafBucketCollector getLeafCollector(LeafReaderContext ctx,
        final LeafBucketCollector sub) throws IOException {
    if (valuesSource == null) {
        return LeafBucketCollector.NO_OP_COLLECTOR;
    }
    final BigArrays bigArrays = context.bigArrays();
    final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx);
    final NumericDoubleValues values = MultiValueMode.MIN.select(allValues, Double.POSITIVE_INFINITY);
    return new LeafBucketCollectorBase(sub, allValues) {

        @Override
        public void collect(int doc, long bucket) throws IOException {
            if (bucket >= mins.size()) {
                long from = mins.size();
                mins = bigArrays.grow(mins, bucket + 1);
                mins.fill(from, mins.size(), Double.POSITIVE_INFINITY);
            }
            final double value = values.get(doc);
            double min = mins.get(bucket);
            min = Math.min(min, value);
            mins.set(bucket, min);
        }

    };
}
 
Example 15
Source Project: lucene-solr   Source File: TermAutomatonQuery.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {

  // Initialize the enums; null for a given slot means that term didn't appear in this reader
  EnumAndScorer[] enums = new EnumAndScorer[idToTerm.size()];

  boolean any = false;
  for(Map.Entry<Integer,TermStates> ent : termStates.entrySet()) {
    TermStates termStates = ent.getValue();
    assert termStates.wasBuiltFor(ReaderUtil.getTopLevelContext(context)) : "The top-reader used to create Weight is not the same as the current reader's top-reader (" + ReaderUtil.getTopLevelContext(context);
    BytesRef term = idToTerm.get(ent.getKey());
    TermState state = termStates.get(context);
    if (state != null) {
      TermsEnum termsEnum = context.reader().terms(field).iterator();
      termsEnum.seekExact(term, state);
      enums[ent.getKey()] = new EnumAndScorer(ent.getKey(), termsEnum.postings(null, PostingsEnum.POSITIONS));
      any = true;
    }
  }

  if (any) {
    return new TermAutomatonScorer(this, enums, anyTermID, idToTerm, new LeafSimScorer(stats, context.reader(), field, true));
  } else {
    return null;
  }
}
 
Example 16
Source Project: lucene-solr   Source File: BlockGroupingCollector.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected void doSetNextReader(LeafReaderContext readerContext) throws IOException {
  if (subDocUpto != 0) {
    processGroup();
  }
  subDocUpto = 0;
  docBase = readerContext.docBase;
  //System.out.println("setNextReader base=" + docBase + " r=" + readerContext.reader);
  Scorer s = lastDocPerGroup.scorer(readerContext);
  if (s == null) {
    lastDocPerGroupBits = null;
  } else {
    lastDocPerGroupBits = s.iterator();
  }
  groupEndDocID = -1;

  currentReaderContext = readerContext;
  for (int i=0; i<comparators.length; i++) {
    leafComparators[i] = comparators[i].getLeafComparator(readerContext);
  }
}
 
Example 17
Source Project: lucene-solr   Source File: SpanPayloadCheckQuery.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public SpanScorer scorer(LeafReaderContext context) throws IOException {
  if (field == null)
    return null;

  Terms terms = context.reader().terms(field);
  if (terms != null && terms.hasPositions() == false) {
    throw new IllegalStateException("field \"" + field + "\" was indexed without position data; cannot run SpanQuery (query=" + parentQuery + ")");
  }

  final Spans spans = getSpans(context, Postings.PAYLOADS);
  if (spans == null) {
    return null;
  }
  final LeafSimScorer docScorer = getSimScorer(context);
  return new SpanScorer(this, spans, docScorer);
}
 
Example 18
Source Project: lucene-solr   Source File: ExportQParserPlugin.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
  final FixedBitSet set = new FixedBitSet(context.reader().maxDoc());
  this.sets[context.ord] = set;
  return new LeafCollector() {
    
    @Override
    public void setScorer(Scorable scorer) throws IOException {}
    
    @Override
    public void collect(int docId) throws IOException{
      ++totalHits;
      set.set(docId);
    }
  };
}
 
Example 19
Source Project: lucene-solr   Source File: ExportWriter.java    License: Apache License 2.0 6 votes vote down vote up
protected void addDocsToItemWriter(List<LeafReaderContext> leaves, IteratorWriter.ItemWriter writer, SortDoc[] docsToExport, int outDocsIndex) throws IOException {
  try {
    for (int i = outDocsIndex; i >= 0; --i) {
      SortDoc s = docsToExport[i];
      writer.add((MapWriter) ew -> {
        writeDoc(s, leaves, ew);
        s.reset();
      });
    }
  } catch (Throwable e) {
    Throwable ex = e;
    while (ex != null) {
      String m = ex.getMessage();
      if (m != null && m.contains("Broken pipe")) {
        throw new IgnoreException();
      }
      ex = ex.getCause();
    }

    if (e instanceof IOException) {
      throw ((IOException) e);
    } else {
      throw new IOException(e);
    }
  }
}
 
Example 20
Source Project: lucene-solr   Source File: GlobalOrdinalsWithScoreQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public boolean isCacheable(LeafReaderContext ctx) {
  // disable caching because this query relies on a top reader context
  // and holds a bitset of matching ordinals that cannot be accounted in
  // the memory used by the cache
  return false;
}
 
Example 21
Source Project: lucene-solr   Source File: AllGroupHeadsCollector.java    License: Apache License 2.0 5 votes vote down vote up
protected SortingGroupHead(Sort sort, T groupValue, int doc, LeafReaderContext context, Scorable scorer) throws IOException {
  super(groupValue, doc, context.docBase);
  final SortField[] sortFields = sort.getSort();
  comparators = new FieldComparator[sortFields.length];
  leafComparators = new LeafFieldComparator[sortFields.length];
  for (int i = 0; i < sortFields.length; i++) {
    comparators[i] = sortFields[i].getComparator(1, i);
    leafComparators[i] = comparators[i].getLeafComparator(context);
    leafComparators[i].setScorer(scorer);
    leafComparators[i].copy(0, doc);
    leafComparators[i].setBottom(0);
  }
}
 
Example 22
Source Project: mtas   Source File: CodecCollector.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Compute termvector number basic.
 *
 * @param docSet
 *          the doc set
 * @param termDocId
 *          the term doc id
 * @param termsEnum
 *          the terms enum
 * @param r
 *          the r
 * @param lrc
 *          the lrc
 * @param postingsEnum
 *          the postings enum
 * @return the termvector number basic
 * @throws IOException
 *           Signals that an I/O exception has occurred.
 */
private static TermvectorNumberBasic computeTermvectorNumberBasic(
    List<Integer> docSet, int termDocId, TermsEnum termsEnum, LeafReader r,
    LeafReaderContext lrc, PostingsEnum postingsEnum) throws IOException {
  TermvectorNumberBasic result = new TermvectorNumberBasic();
  boolean hasDeletedDocuments = (r.getLiveDocs() != null);
  if ((docSet.size() == r.numDocs()) && !hasDeletedDocuments) {
    try {
      return computeTermvectorNumberBasic(termsEnum, r);
    } catch (IOException e) {
      log.debug("problem", e);
      // problem
    }
  }
  result.docNumber = 0;
  result.valueSum[0] = 0;
  int localTermDocId = termDocId;
  Iterator<Integer> docIterator = docSet.iterator();
  postingsEnum = termsEnum.postings(postingsEnum, PostingsEnum.FREQS);
  int docId;
  while (docIterator.hasNext()) {
    docId = docIterator.next() - lrc.docBase;
    if (docId >= localTermDocId && ((docId == localTermDocId)
        || ((localTermDocId = postingsEnum.advance(docId)) == docId))) {
      result.docNumber++;
      result.valueSum[0] += postingsEnum.freq();
    }
    if (localTermDocId == DocIdSetIterator.NO_MORE_DOCS) {
      break;
    }
  }
  return result;
}
 
Example 23
Source Project: lucene-solr   Source File: TestDisjunctionMaxQuery.java    License: Apache License 2.0 5 votes vote down vote up
public void testSkipToFirsttimeMiss() throws IOException {
  final DisjunctionMaxQuery dq = new DisjunctionMaxQuery(
      Arrays.asList(tq("id", "d1"), tq("dek", "DOES_NOT_EXIST")), 0.0f);

  QueryUtils.check(random(), dq, s);
  assertTrue(s.getTopReaderContext() instanceof LeafReaderContext);
  final Weight dw = s.createWeight(s.rewrite(dq), ScoreMode.COMPLETE, 1);
  LeafReaderContext context = (LeafReaderContext)s.getTopReaderContext();
  final Scorer ds = dw.scorer(context);
  final boolean skipOk = ds.iterator().advance(3) != DocIdSetIterator.NO_MORE_DOCS;
  if (skipOk) {
    fail("firsttime skipTo found a match? ... "
        + r.document(ds.docID()).get("id"));
  }
}
 
Example 24
Source Project: lucene-solr   Source File: LRUQueryCache.java    License: Apache License 2.0 5 votes vote down vote up
private DocIdSet cache(LeafReaderContext context) throws IOException {
  final BulkScorer scorer = in.bulkScorer(context);
  if (scorer == null) {
    return DocIdSet.EMPTY;
  } else {
    return cacheImpl(scorer, context.reader().maxDoc());
  }
}
 
Example 25
Source Project: lucene-solr   Source File: IntervalQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
  IntervalIterator intervals = intervalsSource.intervals(field, context);
  if (intervals == null)
    return null;
  return new IntervalScorer(this, intervals, intervalsSource.minExtent(), boost, scoreFunction);
}
 
Example 26
Source Project: mtas   Source File: MtasSpanFollowedByQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public MtasSpans getSpans(LeafReaderContext context,
    Postings requiredPostings) throws IOException {
  Terms terms = context.reader().terms(field);
  if (terms == null) {
    return null; // field does not exist
  }
  MtasSpanFollowedByQuerySpans s1 = new MtasSpanFollowedByQuerySpans(
      MtasSpanFollowedByQuery.this,
      w1.spanWeight.getSpans(context, requiredPostings));
  MtasSpanFollowedByQuerySpans s2 = new MtasSpanFollowedByQuerySpans(
      MtasSpanFollowedByQuery.this,
      w2.spanWeight.getSpans(context, requiredPostings));
  return new MtasSpanFollowedBySpans(MtasSpanFollowedByQuery.this, s1, s2);
}
 
Example 27
Source Project: lucene-solr   Source File: TestFieldCacheSortRandom.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  return new ConstantScoreWeight(this, boost) {
    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {
      Random random = new Random(seed ^ context.docBase);
      final int maxDoc = context.reader().maxDoc();
      final NumericDocValues idSource = DocValues.getNumeric(context.reader(), "id");
      assertNotNull(idSource);
      final FixedBitSet bits = new FixedBitSet(maxDoc);
      for(int docID=0;docID<maxDoc;docID++) {
        if (random.nextFloat() <= density) {
          bits.set(docID);
          //System.out.println("  acc id=" + idSource.getInt(docID) + " docID=" + docID);
          assertEquals(docID, idSource.advance(docID));
          matchValues.add(docValues.get((int) idSource.longValue()));
        }
      }

      return new ConstantScoreScorer(this, score(), scoreMode, new BitSetIterator(bits, bits.approximateCardinality()));
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return true;
    }
  };
}
 
Example 28
Source Project: lucene-solr   Source File: BooleanWeight.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public boolean isCacheable(LeafReaderContext ctx) {
  if (query.clauses().size() > TermInSetQuery.BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD) {
    // Disallow caching large boolean queries to not encourage users
    // to build large boolean queries as a workaround to the fact that
    // we disallow caching large TermInSetQueries.
    return false;
  }
  for (WeightedBooleanClause wc : weightedClauses) {
    Weight w = wc.weight;
    if (w.isCacheable(ctx) == false)
      return false;
  }
  return true;
}
 
Example 29
Source Project: Elasticsearch   Source File: BitsetFilterCache.java    License: Apache License 2.0 5 votes vote down vote up
private BitSet getAndLoadIfNotPresent(final Query query, final LeafReaderContext context) throws IOException, ExecutionException {
    final Object coreCacheReader = context.reader().getCoreCacheKey();
    final ShardId shardId = ShardUtils.extractShardId(context.reader());
    if (shardId != null // can't require it because of the percolator
            && index.getName().equals(shardId.getIndex()) == false) {
        // insanity
        throw new IllegalStateException("Trying to load bit set for index [" + shardId.getIndex()
                + "] with cache of index [" + index.getName() + "]");
    }
    Cache<Query, Value> filterToFbs = loadedFilters.get(coreCacheReader, new Callable<Cache<Query, Value>>() {
        @Override
        public Cache<Query, Value> call() throws Exception {
            context.reader().addCoreClosedListener(BitsetFilterCache.this);
            return CacheBuilder.newBuilder().build();
        }
    });
    return filterToFbs.get(query,new Callable<Value>() {
        @Override
        public Value call() throws Exception {
            final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(context);
            final IndexSearcher searcher = new IndexSearcher(topLevelContext);
            searcher.setQueryCache(null);
            final Weight weight = searcher.createNormalizedWeight(query, false);
            final Scorer s = weight.scorer(context);
            final BitSet bitSet;
            if (s == null) {
                bitSet = null;
            } else {
                bitSet = BitSet.of(s.iterator(), context.reader().maxDoc());
            }

            Value value = new Value(bitSet, shardId);
            listener.onCache(shardId, value.bitset);
            return value;
        }
    }).bitset;
}
 
Example 30
Source Project: lucene-solr   Source File: DisjunctionMaxQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public boolean isCacheable(LeafReaderContext ctx) {
  if (weights.size() > TermInSetQuery.BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD) {
    // Disallow caching large dismax queries to not encourage users
    // to build large dismax queries as a workaround to the fact that
    // we disallow caching large TermInSetQueries.
    return false;
  }
  for (Weight w : weights) {
    if (w.isCacheable(ctx) == false)
      return false;
  }
  return true;
}