org.apache.lucene.search.ScoreDoc Java Examples
The following examples show how to use
org.apache.lucene.search.ScoreDoc.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ProductIndex.java From arcusplatform with Apache License 2.0 | 6 votes |
public List<ProductCatalogEntry> search(String queryString) throws IOException, ParseException { List<ProductCatalogEntry> results = new ArrayList<ProductCatalogEntry>(); IndexReader reader = DirectoryReader.open(dir); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new SimpleAnalyzer(); QueryParser parser = new QueryParser(searchField, analyzer); Query query = parser.parse(queryString); TopDocs docs = searcher.search(query, 100); ScoreDoc[] hits = docs.scoreDocs; for (ScoreDoc sd: hits) { Document doc = searcher.doc(sd.doc); results.add(prodcat.getProductById(doc.get("id"))); } reader.close(); return results; }
Example #2
Source File: TestLucene.java From bookshop with MIT License | 6 votes |
private static void showSearchResults(IndexSearcher searcher, ScoreDoc[] hits, Query query, IKAnalyzer ikAnalyzer) throws IOException { System.out.println("找到 " + hits.length + " 个命中."); System.out.println("序号\t匹配度得分\t结果"); for (int i = 0; i < hits.length; i++) { ScoreDoc scoreDoc = hits[i]; int docId = scoreDoc.doc; Document document = searcher.doc(docId); List<IndexableField> fields = document.getFields(); System.out.print((i + 1)); System.out.print("\t" + scoreDoc.score); for (IndexableField f : fields) { System.out.print("\t" + document.get(f.name())); } System.out.println(); } }
Example #3
Source File: DocsReader.java From localization_nifi with Apache License 2.0 | 6 votes |
public Set<ProvenanceEventRecord> read(final TopDocs topDocs, final EventAuthorizer authorizer, final IndexReader indexReader, final Collection<Path> allProvenanceLogFiles, final AtomicInteger retrievalCount, final int maxResults, final int maxAttributeChars) throws IOException { if (retrievalCount.get() >= maxResults) { return Collections.emptySet(); } final long start = System.nanoTime(); final ScoreDoc[] scoreDocs = topDocs.scoreDocs; final int numDocs = Math.min(scoreDocs.length, maxResults); final List<Document> docs = new ArrayList<>(numDocs); for (int i = numDocs - 1; i >= 0; i--) { final int docId = scoreDocs[i].doc; final Document d = indexReader.document(docId); docs.add(d); } final long readDocuments = System.nanoTime() - start; logger.debug("Reading {} Lucene Documents took {} millis", docs.size(), TimeUnit.NANOSECONDS.toMillis(readDocuments)); return read(docs, authorizer, allProvenanceLogFiles, retrievalCount, maxResults, maxAttributeChars); }
Example #4
Source File: TestNumericTerms32.java From lucene-solr with Apache License 2.0 | 6 votes |
private void testSorting(int precisionStep) throws Exception { String field="field"+precisionStep; // 10 random tests, the index order is ascending, // so using a reverse sort field should retun descending documents int num = TestUtil.nextInt(random(), 10, 20); for (int i = 0; i < num; i++) { int lower=(int)(random().nextDouble()*noDocs*distance)+startOffset; int upper=(int)(random().nextDouble()*noDocs*distance)+startOffset; if (lower>upper) { int a=lower; lower=upper; upper=a; } Query tq= LegacyNumericRangeQuery.newIntRange(field, precisionStep, lower, upper, true, true); TopDocs topDocs = searcher.search(tq, noDocs, new Sort(new SortField(field, SortField.Type.INT, true))); if (topDocs.totalHits.value==0) continue; ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); int last = searcher.doc(sd[0].doc).getField(field).numericValue().intValue(); for (int j=1; j<sd.length; j++) { int act = searcher.doc(sd[j].doc).getField(field).numericValue().intValue(); assertTrue("Docs should be sorted backwards", last>act ); last=act; } } }
Example #5
Source File: TestMultiFieldQPHelper.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testStopWordSearching() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); Directory ramDir = newDirectory(); IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(analyzer)); Document doc = new Document(); doc.add(newTextField("body", "blah the footest blah", Field.Store.NO)); iw.addDocument(doc); iw.close(); StandardQueryParser mfqp = new StandardQueryParser(); mfqp.setMultiFields(new String[] { "body" }); mfqp.setAnalyzer(analyzer); mfqp.setDefaultOperator(StandardQueryConfigHandler.Operator.AND); Query q = mfqp.parse("the footest", null); IndexReader ir = DirectoryReader.open(ramDir); IndexSearcher is = newSearcher(ir); ScoreDoc[] hits = is.search(q, 1000).scoreDocs; assertEquals(1, hits.length); ir.close(); ramDir.close(); }
Example #6
Source File: ReRankCollector.java From lucene-solr with Apache License 2.0 | 6 votes |
public int compare(Object o1, Object o2) { ScoreDoc doc1 = (ScoreDoc) o1; ScoreDoc doc2 = (ScoreDoc) o2; float score1 = doc1.score; float score2 = doc2.score; int idx; if((idx = boostedMap.indexOf(doc1.doc)) >= 0) { score1 = boostedMap.indexGet(idx); } if((idx = boostedMap.indexOf(doc2.doc)) >= 0) { score2 = boostedMap.indexGet(idx); } return -Float.compare(score1, score2); }
Example #7
Source File: ScoreDocRowFunction.java From Elasticsearch with Apache License 2.0 | 6 votes |
@Nullable @Override public Row apply(@Nullable ScoreDoc input) { if (input == null) { return null; } FieldDoc fieldDoc = (FieldDoc) input; scorer.score(fieldDoc.score); for (OrderByCollectorExpression orderByCollectorExpression : orderByCollectorExpressions) { orderByCollectorExpression.setNextFieldDoc(fieldDoc); } List<LeafReaderContext> leaves = indexReader.leaves(); int readerIndex = ReaderUtil.subIndex(fieldDoc.doc, leaves); LeafReaderContext subReaderContext = leaves.get(readerIndex); int subDoc = fieldDoc.doc - subReaderContext.docBase; for (LuceneCollectorExpression<?> expression : expressions) { expression.setNextReader(subReaderContext); expression.setNextDocId(subDoc); } return inputRow; }
Example #8
Source File: TestNumericRangeQuery32.java From lucene-solr with Apache License 2.0 | 6 votes |
private void testLeftOpenRange(int precisionStep) throws Exception { String field="field"+precisionStep; int count=3000; int upper=(count-1)*distance + (distance/3) + startOffset; LegacyNumericRangeQuery<Integer> q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, null, upper, true, true); TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER); ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); Document doc=searcher.doc(sd[0].doc); assertEquals("First doc", startOffset, doc.getField(field).numericValue().intValue()); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue()); q= LegacyNumericRangeQuery.newIntRange(field, precisionStep, null, upper, false, true); topDocs = searcher.search(q, noDocs, Sort.INDEXORDER); sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); doc=searcher.doc(sd[0].doc); assertEquals("First doc", startOffset, doc.getField(field).numericValue().intValue()); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().intValue()); }
Example #9
Source File: TestFieldScoreQuery.java From lucene-solr with Apache License 2.0 | 6 votes |
private void doTestExactScore (ValueSource valueSource) throws Exception { Query functionQuery = getFunctionQuery(valueSource); IndexReader r = DirectoryReader.open(dir); IndexSearcher s = newSearcher(r); TopDocs td = s.search(functionQuery,1000); assertEquals("All docs should be matched!",N_DOCS,td.totalHits.value); ScoreDoc sd[] = td.scoreDocs; for (ScoreDoc aSd : sd) { float score = aSd.score; log(s.explain(functionQuery, aSd.doc)); String id = s.getIndexReader().document(aSd.doc).get(ID_FIELD); float expectedScore = expectedFieldScore(id); // "ID7" --> 7.0 assertEquals("score of " + id + " shuould be " + expectedScore + " != " + score, expectedScore, score, TEST_SCORE_TOLERANCE_DELTA); } r.close(); }
Example #10
Source File: SearchDfsQueryThenFetchAsyncAction.java From Elasticsearch with Apache License 2.0 | 6 votes |
void innerExecuteFetchPhase() throws Exception { boolean useScroll = request.scroll() != null; sortedShardList = searchPhaseController.sortDocs(useScroll, queryResults); searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList); if (docIdsToLoad.asList().isEmpty()) { finishHim(); return; } final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard( request, sortedShardList, firstResults.length() ); final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size()); for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) { QuerySearchResult queryResult = queryResults.get(entry.index); DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId()); ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard); executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node); } }
Example #11
Source File: LuceneTranslator.java From Indra with MIT License | 6 votes |
private Map<String, List<String>> doTranslate(Set<String> terms) { Map<String, List<String>> res = new HashMap<>(); try { TopDocs topDocs = LuceneUtils.getTopDocs(searcher, terms, TERM_FIELD); if (topDocs != null) { for (ScoreDoc sd : topDocs.scoreDocs) { Document doc = searcher.doc(sd.doc); Map<String, Double> content = convert(doc.getBinaryValue(TRANSLATION_FIELD).bytes); res.put(doc.get(TERM_FIELD), getRelevantTranslations(content)); } } } catch (IOException e) { logger.error(e.getMessage()); //TODO throw new expection here. e.printStackTrace(); } return res; }
Example #12
Source File: TestMultiFieldQueryParser.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testStopWordSearching() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); Directory ramDir = newDirectory(); IndexWriter iw = new IndexWriter(ramDir, newIndexWriterConfig(analyzer)); Document doc = new Document(); doc.add(newTextField("body", "blah the footest blah", Field.Store.NO)); iw.addDocument(doc); iw.close(); MultiFieldQueryParser mfqp = new MultiFieldQueryParser(new String[] {"body"}, analyzer); mfqp.setDefaultOperator(QueryParser.Operator.AND); Query q = mfqp.parse("the footest"); IndexReader ir = DirectoryReader.open(ramDir); IndexSearcher is = newSearcher(ir); ScoreDoc[] hits = is.search(q, 1000).scoreDocs; assertEquals(1, hits.length); ir.close(); ramDir.close(); }
Example #13
Source File: AlfrescoReRankQParserPlugin.java From SearchServices with GNU Lesser General Public License v3.0 | 6 votes |
public int compare(Object o1, Object o2) { ScoreDoc doc1 = (ScoreDoc) o1; ScoreDoc doc2 = (ScoreDoc) o2; float score1 = doc1.score; float score2 = doc2.score; if(boostedMap.containsKey(doc1.doc)) { score1 = boostedMap.get(doc1.doc); } if(boostedMap.containsKey(doc2.doc)) { score2 = boostedMap.get(doc2.doc); } if(score1 > score2) { return -1; } else if(score1 < score2) { return 1; } else { return 0; } }
Example #14
Source File: LuceneSearchIndex.java From dremio-oss with Apache License 2.0 | 6 votes |
private List<Doc> toDocs(ScoreDoc[] hits, Searcher searcher) throws IOException{ List<Doc> documentList = new ArrayList<>(); for (int i = 0; i < hits.length; ++i) { ScoreDoc scoreDoc = hits[i]; Document doc = searcher.doc(scoreDoc.doc); IndexableField idField = doc.getField("_id"); if(idField == null){ // deleted between index hit and retrieval. continue; } final BytesRef ref = idField.binaryValue(); final byte[] bytes = new byte[ref.length]; System.arraycopy(ref.bytes, ref.offset, bytes, 0, ref.length); Doc outputDoc = new Doc(scoreDoc, bytes, 0 /*version*/); documentList.add(outputDoc); } return documentList; }
Example #15
Source File: RetrievalApp.java From lucene4ir with Apache License 2.0 | 6 votes |
public ScoreDoc[] runQuery(String qno, String queryTerms){ ScoreDoc[] hits = null; System.out.println("Query No.: " + qno + " " + queryTerms); try { Query query = parser.parse(QueryParser.escape(queryTerms)); try { TopDocs results = searcher.search(query, p.maxResults); hits = results.scoreDocs; } catch (IOException ioe){ ioe.printStackTrace(); System.exit(1); } } catch (ParseException pe){ pe.printStackTrace(); System.exit(1); } return hits; }
Example #16
Source File: LindenResultParser.java From linden with Apache License 2.0 | 6 votes |
private List<LindenHit> parseLindenHits(ScoreDoc[] hits) throws IOException { List<LindenHit> lindenHits = new ArrayList<>(); String idFieldName = config.getSchema().getId(); for (ScoreDoc hit : hits) { LindenHit lindenHit = new LindenHit(); if (Double.isNaN(hit.score)) { // get score for cluster result merge if (sortScoreFieldPos != -1) { lindenHit.setScore(Double.valueOf(((FieldDoc) hit).fields[sortScoreFieldPos].toString())); } else { lindenHit.setScore(1); } } else { lindenHit.setScore(hit.score); } String id = LindenUtil.getFieldStringValue(leaves, hit.doc, idFieldName); lindenHit.setId(id); lindenHit = this.parseSpatial(hit.doc, lindenHit); lindenHit = this.parseSort(hit, lindenHit); lindenHit = this.parseSource(hit.doc, lindenHit); lindenHit = this.parseExplain(hit.doc, lindenHit); lindenHits.add(lindenHit); } lindenHits = this.parseSnippets(lindenHits, hits); return lindenHits; }
Example #17
Source File: TestNumericRangeQuery64.java From lucene-solr with Apache License 2.0 | 6 votes |
private void testLeftOpenRange(int precisionStep) throws Exception { String field="field"+precisionStep; int count=3000; long upper=(count-1)*distance + (distance/3) + startOffset; LegacyNumericRangeQuery<Long> q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, null, upper, true, true); TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER); ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); Document doc=searcher.doc(sd[0].doc); assertEquals("First doc", startOffset, doc.getField(field).numericValue().longValue() ); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() ); q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, null, upper, false, true); topDocs = searcher.search(q, noDocs, Sort.INDEXORDER); sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", count, sd.length ); doc=searcher.doc(sd[0].doc); assertEquals("First doc", startOffset, doc.getField(field).numericValue().longValue() ); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (count-1)*distance+startOffset, doc.getField(field).numericValue().longValue() ); }
Example #18
Source File: TestNumericRangeQuery64.java From lucene-solr with Apache License 2.0 | 6 votes |
private void testRightOpenRange(int precisionStep) throws Exception { String field="field"+precisionStep; int count=3000; long lower=(count-1)*distance + (distance/3) +startOffset; LegacyNumericRangeQuery<Long> q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, true); TopDocs topDocs = searcher.search(q, noDocs, Sort.INDEXORDER); ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); Document doc=searcher.doc(sd[0].doc); assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().longValue() ); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() ); q= LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, null, true, false); topDocs = searcher.search(q, noDocs, Sort.INDEXORDER); sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count", noDocs-count, sd.length ); doc=searcher.doc(sd[0].doc); assertEquals("First doc", count*distance+startOffset, doc.getField(field).numericValue().longValue() ); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc", (noDocs-1)*distance+startOffset, doc.getField(field).numericValue().longValue() ); }
Example #19
Source File: TestTermsEnum2.java From lucene-solr with Apache License 2.0 | 6 votes |
/** tests a pre-intersected automaton against the original */ public void testFiniteVersusInfinite() throws Exception { for (int i = 0; i < numIterations; i++) { String reg = AutomatonTestUtil.randomRegexp(random()); Automaton automaton = Operations.determinize(new RegExp(reg, RegExp.NONE).toAutomaton(), DEFAULT_MAX_DETERMINIZED_STATES); final List<BytesRef> matchedTerms = new ArrayList<>(); for(BytesRef t : terms) { if (Operations.run(automaton, t.utf8ToString())) { matchedTerms.add(t); } } Automaton alternate = Automata.makeStringUnion(matchedTerms); //System.out.println("match " + matchedTerms.size() + " " + alternate.getNumberOfStates() + " states, sigma=" + alternate.getStartPoints().length); //AutomatonTestUtil.minimizeSimple(alternate); //System.out.println("minimize done"); AutomatonQuery a1 = new AutomatonQuery(new Term("field", ""), automaton); AutomatonQuery a2 = new AutomatonQuery(new Term("field", ""), alternate, Integer.MAX_VALUE); ScoreDoc[] origHits = searcher.search(a1, 25).scoreDocs; ScoreDoc[] newHits = searcher.search(a2, 25).scoreDocs; CheckHits.checkEqual(a1, origHits, newHits); } }
Example #20
Source File: LtrQueryTests.java From elasticsearch-learning-to-rank with Apache License 2.0 | 6 votes |
private void assertScoresMatch(List<PrebuiltFeature> features, float[] scores, RankerQuery ltrQuery, ScoreDoc scoreDoc) throws IOException { Document d = searcherUnderTest.doc(scoreDoc.doc); String idVal = d.get("id"); int docId = Integer.decode(idVal); float modelScore = scores[docId]; float queryScore = scoreDoc.score; assertEquals("Scores match with similarity " + similarity.getClass(), modelScore, queryScore, SCORE_NB_ULP_PREC *Math.ulp(modelScore)); if (!(similarity instanceof TFIDFSimilarity)) { // There are precision issues with these similarities when using explain // It produces 0.56103003 for feat:0 in doc1 using score() but 0.5610301 using explain Explanation expl = searcherUnderTest.explain(ltrQuery, docId); assertEquals("Explain scores match with similarity " + similarity.getClass(), expl.getValue().floatValue(), queryScore, 5 * Math.ulp(modelScore)); checkFeatureNames(expl, features); } }
Example #21
Source File: QualityBenchmark.java From lucene-solr with Apache License 2.0 | 6 votes |
private QualityStats analyzeQueryResults(QualityQuery qq, Query q, TopDocs td, Judge judge, PrintWriter logger, long searchTime) throws IOException { QualityStats stts = new QualityStats(judge.maxRecall(qq),searchTime); ScoreDoc sd[] = td.scoreDocs; long t1 = System.currentTimeMillis(); // extraction of first doc name we measure also construction of doc name extractor, just in case. DocNameExtractor xt = new DocNameExtractor(docNameField); for (int i=0; i<sd.length; i++) { String docName = xt.docName(searcher,sd[i].doc); long docNameExtractTime = System.currentTimeMillis() - t1; t1 = System.currentTimeMillis(); boolean isRelevant = judge.isRelevant(docName,qq); stts.addResult(i+1,isRelevant, docNameExtractTime); } if (logger!=null) { logger.println(qq.getQueryID()+" - "+q); stts.log(qq.getQueryID()+" Stats:",1,logger," "); } return stts; }
Example #22
Source File: SearchImpl.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public SearchResults search( Query query, SimilarityConfig simConfig, Sort sort, Set<String> fieldsToLoad, int pageSize, boolean exactHitsCount) { if (pageSize < 0) { throw new LukeException(new IllegalArgumentException("Negative integer is not acceptable for page size.")); } // reset internal status to prepare for a new search session this.docs = new ScoreDoc[0]; this.currentPage = 0; this.pageSize = pageSize; this.exactHitsCount = exactHitsCount; this.query = Objects.requireNonNull(query); this.sort = sort; this.fieldsToLoad = fieldsToLoad == null ? null : Set.copyOf(fieldsToLoad); searcher.setSimilarity(createSimilarity(Objects.requireNonNull(simConfig))); try { return search(); } catch (IOException e) { throw new LukeException("Search Failed.", e); } }
Example #23
Source File: SearchController.java From bookshop with MIT License | 5 votes |
@RequestMapping("searchBook.do") public ModelAndView searchBook(Book book) throws IOException, ParseException { ModelAndView mav = new ModelAndView("searchBook"); // 关键字 String keyword = book.getName(); System.out.println(keyword); // 准备中文分词器 IKAnalyzer analyzer = new IKAnalyzer(); // 索引 Directory index = createIndex(analyzer); // 查询器 Query query = new QueryParser("name",analyzer).parse(keyword); // 搜索 IndexReader reader = DirectoryReader.open(index); IndexSearcher searcher = new IndexSearcher(reader); int numberPerPage = 10; ScoreDoc[] hits = searcher.search(query,numberPerPage).scoreDocs; List<Book> books = new ArrayList<>(); for (int i = 0; i < hits.length; i++) { ScoreDoc scoreDoc = hits[i]; int docId = scoreDoc.doc; Document document = searcher.doc(docId); Book tmpBook = bookService.get(Integer.parseInt(document.get("id"))); books.add(tmpBook); } mav.addObject("books",books); return mav; }
Example #24
Source File: TestLucene.java From bookshop with MIT License | 5 votes |
public static void main(String[] args) throws IOException, ParseException { // 1. 准备中文分词器 IKAnalyzer analyzer = new IKAnalyzer(); // 2. 索引 List<String> productNames = new ArrayList<>(); productNames.add("飞利浦led灯泡e27螺口暖白球泡灯家用照明超亮节能灯泡转色温灯泡"); productNames.add("飞利浦led灯泡e14螺口蜡烛灯泡3W尖泡拉尾节能灯泡暖黄光源Lamp"); productNames.add("雷士照明 LED灯泡 e27大螺口节能灯3W球泡灯 Lamp led节能灯泡"); productNames.add("飞利浦 led灯泡 e27螺口家用3w暖白球泡灯节能灯5W灯泡LED单灯7w"); productNames.add("飞利浦led小球泡e14螺口4.5w透明款led节能灯泡照明光源lamp单灯"); productNames.add("飞利浦蒲公英护眼台灯工作学习阅读节能灯具30508带光源"); productNames.add("欧普照明led灯泡蜡烛节能灯泡e14螺口球泡灯超亮照明单灯光源"); productNames.add("欧普照明led灯泡节能灯泡超亮光源e14e27螺旋螺口小球泡暖黄家用"); productNames.add("聚欧普照明led灯泡节能灯泡e27螺口球泡家用led照明单灯超亮光源"); Directory index = createIndex(analyzer, productNames); // 3. 查询器 String keyword = "护眼带光源"; Query query = new QueryParser("name",analyzer).parse(keyword); // 4. 搜索 IndexReader reader = DirectoryReader.open(index); IndexSearcher searcher = new IndexSearcher(reader); int numberPerPage = 1000; System.out.printf("当前一共有%d条数据%n",productNames.size()); System.out.printf("查询关键字是:\"%s\"%n",keyword); ScoreDoc[] hits = searcher.search(query,numberPerPage).scoreDocs; // 5. 显示查询结果 showSearchResults(searcher, hits, query, analyzer); }
Example #25
Source File: DocToDoubleVectorUtilsTest.java From lucene-solr with Apache License 2.0 | 5 votes |
@Test public void testDenseFreqDoubleArrayConversion() throws Exception { IndexSearcher indexSearcher = new IndexSearcher(index); for (ScoreDoc scoreDoc : indexSearcher.search(new MatchAllDocsQuery(), Integer.MAX_VALUE).scoreDocs) { Terms docTerms = index.getTermVector(scoreDoc.doc, "text"); Double[] vector = DocToDoubleVectorUtils.toDenseLocalFreqDoubleArray(docTerms); assertNotNull(vector); assertTrue(vector.length > 0); } }
Example #26
Source File: GroupDocs.java From lucene-solr with Apache License 2.0 | 5 votes |
public GroupDocs(float score, float maxScore, TotalHits totalHits, ScoreDoc[] scoreDocs, T groupValue, Object[] groupSortValues) { this.score = score; this.maxScore = maxScore; this.totalHits = totalHits; this.scoreDocs = scoreDocs; this.groupValue = groupValue; this.groupSortValues = groupSortValues; }
Example #27
Source File: SearchBuilder.java From taoshop with Apache License 2.0 | 5 votes |
public static void doSearch(String indexDir , String queryStr) throws IOException, ParseException, InvalidTokenOffsetsException { Directory directory = FSDirectory.open(Paths.get(indexDir)); DirectoryReader reader = DirectoryReader.open(directory); IndexSearcher searcher = new IndexSearcher(reader); Analyzer analyzer = new SmartChineseAnalyzer(); QueryParser parser = new QueryParser("tcontent",analyzer); Query query = parser.parse(queryStr); long startTime = System.currentTimeMillis(); TopDocs docs = searcher.search(query,10); System.out.println("查找"+queryStr+"所用时间:"+(System.currentTimeMillis()-startTime)); System.out.println("查询到"+docs.totalHits+"条记录"); //加入高亮显示的 SimpleHTMLFormatter simpleHTMLFormatter = new SimpleHTMLFormatter("<b><font color=red>","</font></b>"); QueryScorer scorer = new QueryScorer(query);//计算查询结果最高的得分 Fragmenter fragmenter = new SimpleSpanFragmenter(scorer);//根据得分算出一个片段 Highlighter highlighter = new Highlighter(simpleHTMLFormatter,scorer); highlighter.setTextFragmenter(fragmenter);//设置显示高亮的片段 //遍历查询结果 for(ScoreDoc scoreDoc : docs.scoreDocs){ Document doc = searcher.doc(scoreDoc.doc); System.out.println(doc.get("title")); String tcontent = doc.get("tcontent"); if(tcontent != null){ TokenStream tokenStream = analyzer.tokenStream("tcontent", new StringReader(tcontent)); String summary = highlighter.getBestFragment(tokenStream, tcontent); System.out.println(summary); } } reader.close(); }
Example #28
Source File: TestNumericRangeQuery64.java From lucene-solr with Apache License 2.0 | 5 votes |
/** test for constant score + boolean query + filter, the other tests only use the constant score mode */ private void testRange(int precisionStep) throws Exception { String field="field"+precisionStep; int count=3000; long lower=(distance*3/2)+startOffset, upper=lower + count*distance + (distance/3); LegacyNumericRangeQuery<Long> q = LegacyNumericRangeQuery.newLongRange(field, precisionStep, lower, upper, true, true); for (byte i=0; i<2; i++) { TopFieldCollector collector = TopFieldCollector.create(Sort.INDEXORDER, noDocs, Integer.MAX_VALUE); String type; switch (i) { case 0: type = " (constant score filter rewrite)"; q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_REWRITE); break; case 1: type = " (constant score boolean rewrite)"; q.setRewriteMethod(MultiTermQuery.CONSTANT_SCORE_BOOLEAN_REWRITE); break; default: return; } searcher.search(q, collector); TopDocs topDocs = collector.topDocs(); ScoreDoc[] sd = topDocs.scoreDocs; assertNotNull(sd); assertEquals("Score doc count"+type, count, sd.length ); Document doc=searcher.doc(sd[0].doc); assertEquals("First doc"+type, 2*distance+startOffset, doc.getField(field).numericValue().longValue() ); doc=searcher.doc(sd[sd.length-1].doc); assertEquals("Last doc"+type, (1+count)*distance+startOffset, doc.getField(field).numericValue().longValue() ); } }
Example #29
Source File: IndexSearcher.java From marathonv5 with Apache License 2.0 | 5 votes |
public Map<DocumentType, List<SearchResult>> search(String searchString) throws ParseException { Map<DocumentType, List<SearchResult>> resultMap = new TreeMap<DocumentType, List<SearchResult>>(); try { Query query = parser.parse(searchString); final SecondPassGroupingCollector collector = new SecondPassGroupingCollector("documentType", searchGroups, Sort.RELEVANCE, null, 5, true, false, true); searcher.search(query, collector); final TopGroups groups = collector.getTopGroups(0); for (GroupDocs groupDocs : groups.groups) { DocumentType docType = DocumentType.valueOf(groupDocs.groupValue); List<SearchResult> results = new ArrayList<SearchResult>(); for (ScoreDoc scoreDoc : groupDocs.scoreDocs) { Document doc = searcher.doc(scoreDoc.doc); SearchResult result = new SearchResult( docType, doc.get("name"), doc.get("url"), doc.get("className"), doc.get("package"), doc.get("ensemblePath"), doc.get("shortDescription") ); results.add(result); } resultMap.put(docType, results); } } catch (IOException e) { e.printStackTrace(); } return resultMap; }
Example #30
Source File: StoredFieldsShardRequestFactory.java From lucene-solr with Apache License 2.0 | 5 votes |
private void mapShardToDocs(HashMap<String, Set<ShardDoc>> shardMap, ScoreDoc[] scoreDocs) { for (ScoreDoc scoreDoc : scoreDocs) { ShardDoc solrDoc = (ShardDoc) scoreDoc; Set<ShardDoc> shardDocs = shardMap.get(solrDoc.shard); if (shardDocs == null) { shardMap.put(solrDoc.shard, shardDocs = new HashSet<>()); } shardDocs.add(solrDoc); } }