Java Code Examples for org.apache.lucene.search.Scorer#docID()
The following examples show how to use
org.apache.lucene.search.Scorer#docID() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LTRScoringQuery.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public float score() throws IOException { reset(); freq = 0; if (targetDoc == activeDoc) { for (final Scorer scorer : featureScorers) { if (scorer.docID() == activeDoc) { freq++; Feature.FeatureWeight scFW = (Feature.FeatureWeight) scorer.getWeight(); final int featureId = scFW.getIndex(); featuresInfo[featureId].setValue(scorer.score()); featuresInfo[featureId].setUsed(true); } } } return makeNormalizedFeaturesAndScore(); }
Example 2
Source File: RankerQuery.java From elasticsearch-learning-to-rank with Apache License 2.0 | 6 votes |
@Override public float score() throws IOException { fv = ranker.newFeatureVector(fv); int ordinal = -1; // a DisiPriorityQueue could help to avoid // looping on all scorers for (Scorer scorer : scorers) { ordinal++; // FIXME: Probably inefficient, again we loop over all scorers.. if (scorer.docID() == docID()) { // XXX: bold assumption that all models are dense // do we need a some indirection to infer the featureId? fv.setFeatureScore(ordinal, scorer.score()); } } return ranker.score(fv); }
Example 3
Source File: LTRScoringQuery.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public int nextDoc() throws IOException { if (activeDoc <= targetDoc) { activeDoc = NO_MORE_DOCS; for (final Scorer scorer : featureScorers) { if (scorer.docID() != NO_MORE_DOCS) { activeDoc = Math.min(activeDoc, scorer.iterator().nextDoc()); } } } return ++targetDoc; }
Example 4
Source File: LTRScoringQuery.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public int advance(int target) throws IOException { if (activeDoc < target) { activeDoc = NO_MORE_DOCS; for (final Scorer scorer : featureScorers) { if (scorer.docID() != NO_MORE_DOCS) { activeDoc = Math.min(activeDoc, scorer.iterator().advance(target)); } } } targetDoc = target; return target; }
Example 5
Source File: LoggingFetchSubPhase.java From elasticsearch-learning-to-rank with Apache License 2.0 | 4 votes |
void doLog(Query query, List<HitLogConsumer> loggers, IndexSearcher searcher, SearchHit[] hits) throws IOException { // Reorder hits by id so we can scan all the docs belonging to the same // segment by reusing the same scorer. SearchHit[] reordered = new SearchHit[hits.length]; System.arraycopy(hits, 0, reordered, 0, hits.length); Arrays.sort(reordered, Comparator.comparingInt(SearchHit::docId)); int hitUpto = 0; int readerUpto = -1; int endDoc = 0; int docBase = 0; Scorer scorer = null; Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE, 1F); // Loop logic borrowed from lucene QueryRescorer while (hitUpto < reordered.length) { SearchHit hit = reordered[hitUpto]; int docID = hit.docId(); loggers.forEach((l) -> l.nextDoc(hit)); LeafReaderContext readerContext = null; while (docID >= endDoc) { readerUpto++; readerContext = searcher.getTopReaderContext().leaves().get(readerUpto); endDoc = readerContext.docBase + readerContext.reader().maxDoc(); } if (readerContext != null) { // We advanced to another segment: docBase = readerContext.docBase; scorer = weight.scorer(readerContext); } if (scorer != null) { int targetDoc = docID - docBase; int actualDoc = scorer.docID(); if (actualDoc < targetDoc) { actualDoc = scorer.iterator().advance(targetDoc); } if (actualDoc == targetDoc) { // Scoring will trigger log collection scorer.score(); } } hitUpto++; } }