Java Code Examples for org.apache.lucene.search.IndexSearcher

The following examples show how to use org.apache.lucene.search.IndexSearcher. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: lucene-solr   Source File: BaseXYPointTestCase.java    License: Apache License 2.0 6 votes vote down vote up
/** test we can search for a polygon with a hole (but still includes the doc) */
public void testPolygonHole() throws Exception {
  assumeTrue("Impl does not support polygons", supportsPolygons());
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  // add a doc with a point
  Document document = new Document();
  addPointToDoc("field", document, 18.313694f, -65.227444f);
  writer.addDocument(document);
  
  // search and verify we found our doc
  IndexReader reader = writer.getReader();
  IndexSearcher searcher = newSearcher(reader);
  XYPolygon inner = new XYPolygon(new float[] { 18.5f, 18.5f, 18.7f, 18.7f, 18.5f },
                              new float[] { -65.7f, -65.4f, -65.4f, -65.7f, -65.7f });
  XYPolygon outer = new XYPolygon(new float[] { 18, 18, 19, 19, 18 },
                              new float[] { -66, -65, -65, -66, -66 }, inner);
  assertEquals(1, searcher.count(newPolygonQuery("field", outer)));

  reader.close();
  writer.close();
  dir.close();
}
 
Example 2
Source Project: james-project   Source File: LuceneMessageSearchIndex.java    License: Apache License 2.0 6 votes vote down vote up
private Flags retrieveFlags(Mailbox mailbox, MessageUid uid) throws IOException {
    try (IndexSearcher searcher = new IndexSearcher(IndexReader.open(writer, true))) {
        Flags retrievedFlags = new Flags();

        BooleanQuery query = new BooleanQuery();
        query.add(new TermQuery(new Term(MAILBOX_ID_FIELD, mailbox.getMailboxId().serialize())), BooleanClause.Occur.MUST);
        query.add(createQuery(MessageRange.one(uid)), BooleanClause.Occur.MUST);
        query.add(new PrefixQuery(new Term(FLAGS_FIELD, "")), BooleanClause.Occur.MUST);

        TopDocs docs = searcher.search(query, 100000);
        ScoreDoc[] sDocs = docs.scoreDocs;
        for (ScoreDoc sDoc : sDocs) {
            Document doc = searcher.doc(sDoc.doc);

            Stream.of(doc.getValues(FLAGS_FIELD))
                .forEach(flag -> fromString(flag).ifPresentOrElse(retrievedFlags::add, () -> retrievedFlags.add(flag)));
        }
        return retrievedFlags;
    }
}
 
Example 3
Source Project: uyuni   Source File: NGramQueryParserTest.java    License: GNU General Public License v2.0 6 votes vote down vote up
public Hits performSearch(Directory dir, String query, boolean useMust)
    throws Exception {

    NGramQueryParser parser = new NGramQueryParser("name",
            new NGramAnalyzer(min_ngram, max_ngram), useMust);
    IndexSearcher searcher = new IndexSearcher(dir);
    Query q = parser.parse(query);
    Hits hits = searcher.search(q);
    log.info("Original Query = " + query);
    log.info("Parsed Query = " + q.toString());
    log.info("Hits.length() = " + hits.length());
    for (int i=0; i < hits.length(); i++) {
        log.debug("Document<"+hits.id(i)+"> = " + hits.doc(i));
        //Explanation explain = searcher.explain(q, hits.id(i));
        //log.debug("explain = " + explain.toString());
    }
    return hits;
}
 
Example 4
Source Project: crate   Source File: IpColumnReferenceTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testIpExpression() throws Exception {
    IpColumnReference columnReference = new IpColumnReference(IP_COLUMN);
    columnReference.startCollect(ctx);
    columnReference.setNextReader(readerContext);
    IndexSearcher searcher = new IndexSearcher(readerContext.reader());
    TopDocs topDocs = searcher.search(new MatchAllDocsQuery(), 21);
    assertThat(topDocs.scoreDocs.length, is(21));

    int i = 0;
    for (ScoreDoc doc : topDocs.scoreDocs) {
        columnReference.setNextDocId(doc.doc);
        if (i == 20) {
            assertThat(columnReference.value(), is(nullValue()));
        } else if (i < 10) {
            assertThat(columnReference.value(), is("192.168.0." + i));
        } else {
            assertThat(columnReference.value(),
                is("7bd0:8082:2df8:487e:e0df:e7b5:9362:" + Integer.toHexString(i)));
        }
        i++;
    }
}
 
Example 5
Source Project: lucene-solr   Source File: CompositeVerifyQuery.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final Weight indexQueryWeight = indexQuery.createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);//scores aren't unsupported

  return new ConstantScoreWeight(this, boost) {

    @Override
    public Scorer scorer(LeafReaderContext context) throws IOException {

      final Scorer indexQueryScorer = indexQueryWeight.scorer(context);
      if (indexQueryScorer == null) {
        return null;
      }

      final TwoPhaseIterator predFuncValues = predicateValueSource.iterator(context, indexQueryScorer.iterator());
      return new ConstantScoreScorer(this, score(), scoreMode, predFuncValues);
    }

    @Override
    public boolean isCacheable(LeafReaderContext ctx) {
      return predicateValueSource.isCacheable(ctx);
    }

  };
}
 
Example 6
Source Project: lucene-solr   Source File: TestSelectiveWeightCreation.java    License: Apache License 2.0 6 votes vote down vote up
private LTRScoringQuery.ModelWeight performQuery(TopDocs hits,
    IndexSearcher searcher, int docid, LTRScoringQuery model) throws IOException,
    ModelException {
  final List<LeafReaderContext> leafContexts = searcher.getTopReaderContext()
      .leaves();
  final int n = ReaderUtil.subIndex(hits.scoreDocs[0].doc, leafContexts);
  final LeafReaderContext context = leafContexts.get(n);
  final int deBasedDoc = hits.scoreDocs[0].doc - context.docBase;

  final Weight weight = searcher.createWeight(searcher.rewrite(model), ScoreMode.COMPLETE, 1);
  final Scorer scorer = weight.scorer(context);

  // rerank using the field final-score
  scorer.iterator().advance(deBasedDoc);
  scorer.score();
  assertTrue(weight instanceof LTRScoringQuery.ModelWeight);
  final LTRScoringQuery.ModelWeight modelWeight = (LTRScoringQuery.ModelWeight) weight;
  return modelWeight;

}
 
Example 7
Source Project: lucene-solr   Source File: TestSortedSetDocValuesFacets.java    License: Apache License 2.0 6 votes vote down vote up
private static Facets getAllFacets(IndexSearcher searcher, SortedSetDocValuesReaderState state,
                                   ExecutorService exec) throws IOException, InterruptedException {
  if (random().nextBoolean()) {
    FacetsCollector c = new FacetsCollector();
    searcher.search(new MatchAllDocsQuery(), c);
    if (exec != null) {
      return new ConcurrentSortedSetDocValuesFacetCounts(state, c, exec);
    } else {
      return new SortedSetDocValuesFacetCounts(state, c);
    }
  } else if (exec != null) {
    return new ConcurrentSortedSetDocValuesFacetCounts(state, exec);
  } else {
    return new SortedSetDocValuesFacetCounts(state);
  }
}
 
Example 8
Source Project: lucene-solr   Source File: TestSimilarity2.java    License: Apache License 2.0 6 votes vote down vote up
/** similar to the above, but ORs the query with a real field */
public void testEmptyField() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(newTextField("foo", "bar", Field.Store.NO));
  iw.addDocument(doc);
  IndexReader ir = iw.getReader();
  iw.close();
  IndexSearcher is = newSearcher(ir);
  
  for (Similarity sim : sims) {
    is.setSimilarity(sim);
    BooleanQuery.Builder query = new BooleanQuery.Builder();
    query.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD);
    query.add(new TermQuery(new Term("bar", "baz")), BooleanClause.Occur.SHOULD);
    assertEquals(1, is.search(query.build(), 10).totalHits.value);
  }
  ir.close();
  dir.close();
}
 
Example 9
Source Project: crate   Source File: LuceneOrderedDocCollectorTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSearchWithScores() throws Exception {
    IndexWriter w = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
    KeywordFieldMapper.KeywordFieldType fieldType = new KeywordFieldMapper.KeywordFieldType();
    fieldType.setName("x");
    fieldType.freeze();

    for (int i = 0; i < 3; i++) {
        addDoc(w, fieldType, "Arthur");
    }
    addDoc(w, fieldType, "Arthur"); // not "Arthur" to lower score
    w.commit();
    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(w, true, true));

    List<LuceneCollectorExpression<?>> columnReferences = Collections.singletonList(new ScoreCollectorExpression());
    Query query = fieldType.termsQuery(Collections.singletonList("Arthur"), null);
    LuceneOrderedDocCollector collector = collector(searcher, columnReferences, query, null, true);
    KeyIterable<ShardId, Row> result = collector.collect();

    assertThat(Iterables.size(result), is(2));

    Iterator<Row> values = result.iterator();

    assertThat(values.next().get(0), Matchers.is(1.0F));
    assertThat(values.next().get(0), Matchers.is(1.0F));
}
 
Example 10
Source Project: cqunews-web   Source File: SearchDao.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @decription:根据关键词查询
 * @parm:@param keywords
 * @return:TopDocs
 * @throws:IOException
 * @throws:ParseException
 */
public TopDocs searcher(String keywords) throws IOException, ParseException {
	Directory directory = FSDirectory.open(new File(Constant.INDEXDIR));
	TopDocs topDocs =null;
	
	IndexReader indexReader = DirectoryReader.open(directory);
	IndexSearcher indexSearcher = new IndexSearcher(indexReader);
	
	Query query = new TermQuery(new Term("title",
			keywords));
	// 检索符合query条件的前n条记录
	topDocs = indexSearcher.search(query, 10);
	System.out.println("返回总记录数" + topDocs.totalHits);
	ScoreDoc scoreDocs[] = topDocs.scoreDocs;
	for (ScoreDoc scoreDoc : scoreDocs) {
		
		int docID = scoreDoc.doc;
		// 根据id检索document
		Document document = indexSearcher.doc(docID);
		System.out.println("标题:"+document.get("title"));
		System.out.println("内容:"+document.get("content"));
		System.out.println("-----------------------------------------------------");
	}
	return topDocs;
}
 
Example 11
Source Project: lucene-solr   Source File: FuzzyCompletionQuery.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  final Automaton originalAutomata;
  try (CompletionTokenStream stream = (CompletionTokenStream) analyzer.tokenStream(getField(), getTerm().text()) ) {
    originalAutomata = stream.toAutomaton(unicodeAware);
  }
  Set<IntsRef> refs = new HashSet<>();
  Automaton automaton = toLevenshteinAutomata(originalAutomata, refs);
  if (unicodeAware) {
    Automaton utf8automaton = new UTF32ToUTF8().convert(automaton);
    utf8automaton = Operations.determinize(utf8automaton, maxDeterminizedStates);
    automaton = utf8automaton;
  }
  // TODO Accumulating all refs is bad, because the resulting set may be very big.
  // TODO Better iterate over automaton again inside FuzzyCompletionWeight?
  return new FuzzyCompletionWeight(this, automaton, refs);
}
 
Example 12
Source Project: lucene-solr   Source File: TestGeo3DPoint.java    License: Apache License 2.0 6 votes vote down vote up
public void testBasic() throws Exception {
  Directory dir = getDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig();
  iwc.setCodec(getCodec());
  IndexWriter w = new IndexWriter(dir, iwc);
  Document doc = new Document();
  PlanetModel planetModel = randomPlanetModel();
  doc.add(new Geo3DPoint("field", planetModel, 50.7345267, -97.5303555));
  w.addDocument(doc);
  IndexReader r = DirectoryReader.open(w);
  // We can't wrap with "exotic" readers because the query must see the BKD3DDVFormat:
  IndexSearcher s = newSearcher(r, false);
  assertEquals(1, s.search(Geo3DPoint.newShapeQuery("field",
                                                    GeoCircleFactory.makeGeoCircle(planetModel, toRadians(50), toRadians(-97), Math.PI/180.)), 1).totalHits.value);
  w.close();
  r.close();
  dir.close();
}
 
Example 13
Source Project: lucene-solr   Source File: TestQueryBuilder.java    License: Apache License 2.0 6 votes vote down vote up
public void testMaxBooleanClause() throws Exception {
  int size = 34;
  CannedBinaryTokenStream.BinaryToken[] tokens = new CannedBinaryTokenStream.BinaryToken[size];
  BytesRef term1 = new BytesRef("ff");
  BytesRef term2 = new BytesRef("f");
  for (int i = 0; i < size;) {
    if (i % 2 == 0) {
      tokens[i] = new CannedBinaryTokenStream.BinaryToken(term2, 1, 1);
      tokens[i + 1] = new CannedBinaryTokenStream.BinaryToken(term1, 0, 2);
      i += 2;
    } else {
      tokens[i] = new CannedBinaryTokenStream.BinaryToken(term2, 1, 1);
      i ++;
    }
  }
  QueryBuilder qb = new QueryBuilder(null);
  try (TokenStream ts = new CannedBinaryTokenStream(tokens)) {
    expectThrows(IndexSearcher.TooManyClauses.class, () -> qb.analyzeGraphBoolean("", ts, BooleanClause.Occur.MUST));
  }
  try (TokenStream ts = new CannedBinaryTokenStream(tokens)) {
    expectThrows(IndexSearcher.TooManyClauses.class, () -> qb.analyzeGraphBoolean("", ts, BooleanClause.Occur.SHOULD));
  }
  try (TokenStream ts = new CannedBinaryTokenStream(tokens)) {
    expectThrows(IndexSearcher.TooManyClauses.class, () -> qb.analyzeGraphPhrase(ts, "", 0));
  }
}
 
Example 14
Source Project: tagme   Source File: AnchorIndexer.java    License: Apache License 2.0 6 votes vote down vote up
static int freq(Set<String> anchors, IndexSearcher index, QueryParser queryParser) throws IOException
{
	//int sum = 0;
	BitSet bits = new BitSet(index.maxDoc());
	for(String a : anchors)
	{
		try {
			Query q = queryParser.parse(String.format(QUERY_PATTERN, QueryParser.escape(a)));
			
			TotalHitCountCollectorSet results = new TotalHitCountCollectorSet(bits);
			
			index.search(q, results);
		
			//sum += results.getTotalHits();
		
		} catch (ParseException e) {
			
		}
	}
	return bits.cardinality();
}
 
Example 15
Source Project: lucene4ir   Source File: LanguageModel.java    License: Apache License 2.0 6 votes vote down vote up
public LanguageModel(IndexReader ir, int[] doc_ids) {
    reader = ir;
    searcher = new IndexSearcher(reader);
    this.doc_ids = doc_ids;
    doc_len = 0.0;
    for (int doc_id : doc_ids) {
        updateTermCountMap(doc_id, 1.0);
    }

    try {
        collectionStats = searcher.collectionStatistics(field);
        token_count = collectionStats.sumTotalTermFreq();
    } catch (IOException e) {
        e.printStackTrace();
        System.exit(1);
    }
}
 
Example 16
Source Project: onedev   Source File: DefaultIndexManager.java    License: MIT License 6 votes vote down vote up
@Override
public boolean isIndexed(Project project, ObjectId commit) {
	File indexDir = storageManager.getProjectIndexDir(project.getId());
	try (Directory directory = FSDirectory.open(indexDir.toPath())) {
		if (DirectoryReader.indexExists(directory)) {
			try (IndexReader reader = DirectoryReader.open(directory)) {
				IndexSearcher searcher = new IndexSearcher(reader);
				return getIndexVersion().equals(getCommitIndexVersion(searcher, commit));
			}
		} else {
			return false;
		}
	} catch (IOException e) {
		throw new RuntimeException(e);
	}
}
 
Example 17
Source Project: solr-redis   Source File: TestRedisQParser.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void shouldReturnEmptyQueryOnEmptyListOfHget() throws SyntaxError, IOException {
  when(localParamsMock.get("command")).thenReturn("hget");
  when(localParamsMock.get("key")).thenReturn("simpleKey");
  when(localParamsMock.get("field")).thenReturn("f1");
  when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
  when(jedisMock.hget(anyString(), anyString())).thenReturn(null);
  when(requestMock.getSchema()).thenReturn(schema);
  when(schema.getQueryAnalyzer()).thenReturn(new StandardAnalyzer());
  redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
  final Query query = redisQParser.parse();
  verify(jedisMock).hget("simpleKey", "f1");
  IndexSearcher searcher = new IndexSearcher(new MultiReader());
  final Set<Term> terms = extractTerms(searcher, query);
  Assert.assertEquals(0, terms.size());
}
 
Example 18
public void testMatchesSlopBug() throws IOException {
  IndexReader ir = indexSomeFields();
  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = new UnifiedHighlighter(searcher, indexAnalyzer);
  Query query = new IntervalQuery("title", Intervals.maxgaps(random().nextBoolean() ? 1 : 2,
      Intervals.ordered(
          Intervals.term("this"), Intervals.term("is"), Intervals.term("the"), Intervals.term("field"))));
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(1, topDocs.totalHits.value);
  String[] snippets = highlighter.highlight("title", query, topDocs, 10);
  assertEquals(1, snippets.length);
  if (highlighter.getFlags("title").contains(HighlightFlag.WEIGHT_MATCHES)) {
    assertEquals("" + highlighter.getFlags("title"),
        "<b>This is the title field</b>.", snippets[0]);
  } else {
    assertEquals("" + highlighter.getFlags("title"),
        "<b>This</b> <b>is</b> <b>the</b> title <b>field</b>.", snippets[0]);
  }
  ir.close();
}
 
Example 19
Source Project: lucene-solr   Source File: HashQParserPlugin.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {

  SolrIndexSearcher solrIndexSearcher = (SolrIndexSearcher)searcher;
  IndexReaderContext context = solrIndexSearcher.getTopReaderContext();

  List<LeafReaderContext> leaves =  context.leaves();
  FixedBitSet[] fixedBitSets = new FixedBitSet[leaves.size()];

  for(LeafReaderContext leaf : leaves) {
    try {
      SegmentPartitioner segmentPartitioner = new SegmentPartitioner(leaf,worker,workers, keys, solrIndexSearcher);
      segmentPartitioner.run();
      fixedBitSets[segmentPartitioner.context.ord] = segmentPartitioner.docs;
    } catch(Exception e) {
      throw new IOException(e);
    }
  }

  ConstantScoreQuery constantScoreQuery = new ConstantScoreQuery(new BitsFilter(fixedBitSets));
  return searcher.rewrite(constantScoreQuery).createWeight(searcher, ScoreMode.COMPLETE_NO_SCORES, boost);
}
 
Example 20
Source Project: Lottery   Source File: LuceneContentSvcImpl.java    License: GNU General Public License v2.0 6 votes vote down vote up
@Transactional(readOnly = true)
public Pagination searchPage(Directory dir, String queryString,String category,String workplace,
		Integer siteId, Integer channelId, Date startDate, Date endDate,
		int pageNo, int pageSize) throws CorruptIndexException,
		IOException, ParseException {
	Searcher searcher = new IndexSearcher(dir);
	try {
		Analyzer analyzer = new StandardAnalyzer(Version.LUCENE_30);
		Query query = LuceneContent.createQuery(queryString,category,workplace, siteId,
				channelId, startDate, endDate, analyzer);
		TopDocs docs = searcher.search(query, pageNo * pageSize);
		Pagination p = LuceneContent.getResultPage(searcher, docs, pageNo,
				pageSize);
		List<?> ids = p.getList();
		List<Content> contents = new ArrayList<Content>(ids.size());
		for (Object id : ids) {
			contents.add(contentMng.findById((Integer) id));
		}
		p.setList(contents);
		return p;
	} finally {
		searcher.close();
	}
}
 
Example 21
Source Project: lucene-solr   Source File: AnalyzingInfixSuggester.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public long getCount() throws IOException {
  if (searcherMgr == null) {
    return 0;
  }
  SearcherManager mgr;
  IndexSearcher searcher;
  synchronized (searcherMgrLock) {
    mgr = searcherMgr; // acquire & release on same SearcherManager, via local reference
    searcher = mgr.acquire();
  }
  try {
    return searcher.getIndexReader().numDocs();
  } finally {
    mgr.release(searcher);
  }
}
 
Example 22
Source Project: lucene-solr   Source File: TestBigIntegerPoint.java    License: Apache License 2.0 6 votes vote down vote up
/** Add a single 1D point and search for it */
public void testBasics() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  // add a doc with a large biginteger value
  Document document = new Document();
  BigInteger large = BigInteger.valueOf(Long.MAX_VALUE).multiply(BigInteger.valueOf(64));
  document.add(new BigIntegerPoint("field", large));
  writer.addDocument(document);
  
  // search and verify we found our doc
  IndexReader reader = writer.getReader();
  IndexSearcher searcher = newSearcher(reader);
  assertEquals(1, searcher.count(BigIntegerPoint.newExactQuery("field", large)));
  assertEquals(1, searcher.count(BigIntegerPoint.newRangeQuery("field", large.subtract(BigInteger.ONE), large.add(BigInteger.ONE))));
  assertEquals(1, searcher.count(BigIntegerPoint.newSetQuery("field", large)));
  assertEquals(0, searcher.count(BigIntegerPoint.newSetQuery("field", large.subtract(BigInteger.ONE))));
  assertEquals(0, searcher.count(BigIntegerPoint.newSetQuery("field")));

  reader.close();
  writer.close();
  dir.close();
}
 
Example 23
Source Project: lucene-solr   Source File: TestSpanTermQuery.java    License: Apache License 2.0 6 votes vote down vote up
public void testNoPositions() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(new StringField("foo", "bar", Field.Store.NO));
  iw.addDocument(doc);
  
  IndexReader ir = iw.getReader();
  iw.close();
  
  IndexSearcher is = new IndexSearcher(ir);
  SpanTermQuery query = new SpanTermQuery(new Term("foo", "bar"));
  IllegalStateException expected = expectThrows(IllegalStateException.class, () -> {
    is.search(query, 5);
  });
  assertTrue(expected.getMessage().contains("was indexed without position data"));

  ir.close();
  dir.close();
}
 
Example 24
Source Project: solr-redis   Source File: TestRedisQParser.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void shouldParseJsonTermsFromRedisOnGetCommand() throws SyntaxError, IOException {
  when(localParamsMock.get("command")).thenReturn("get");
  when(localParamsMock.get("key")).thenReturn("simpleKey");
  when(localParamsMock.get("serialization")).thenReturn("json");
  when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
  when(jedisMock.get(any(byte[].class))).thenReturn("[1,2,3]".getBytes());
  when(requestMock.getSchema()).thenReturn(schema);
  when(schema.getQueryAnalyzer()).thenReturn(new StandardAnalyzer());
  redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
  final Query query = redisQParser.parse();
  verify(jedisMock).get("simpleKey".getBytes());
  IndexSearcher searcher = new IndexSearcher(new MultiReader());
  final Set<Term> terms = extractTerms(searcher, query);
  Assert.assertEquals(3, terms.size());
}
 
Example 25
Source Project: lucene-solr   Source File: SegmentInfosSearcherManager.java    License: Apache License 2.0 6 votes vote down vote up
@Override
protected IndexSearcher refreshIfNeeded(IndexSearcher old) throws IOException {
  List<LeafReader> subs;
  if (old == null) {
    subs = null;
  } else {
    subs = new ArrayList<>();
    for(LeafReaderContext ctx : old.getIndexReader().leaves()) {
      subs.add(ctx.reader());
    }
  }

  // Open a new reader, sharing any common segment readers with the old one:
  DirectoryReader r = StandardDirectoryReader.open(dir, currentInfos, subs);
  addReaderClosedListener(r);
  node.message("refreshed to version=" + currentInfos.getVersion() + " r=" + r);
  return SearcherManager.getSearcher(searcherFactory, r, old.getIndexReader());
}
 
Example 26
public static IndexBasedEntityChecker create(String indexDirPath) {
    Directory indexDirectory = null;
    File directoryPath = new File(indexDirPath);
    if (directoryPath.exists() && directoryPath.isDirectory() && (directoryPath.list().length > 0)) {
        try {
            indexDirectory = FSDirectory.open(directoryPath.toPath());
            IndexReader indexReader = DirectoryReader.open(indexDirectory);
            IndexSearcher indexSearcher = new IndexSearcher(indexReader);
            return new IndexBasedEntityChecker(indexSearcher, indexDirectory, indexReader);
        } catch (IOException e) {
            LOGGER.error("Exception while trying to open index for entity checking. Returning null.", e);
            IOUtils.closeQuietly(indexDirectory);
            return null;
        }
    } else {
        LOGGER.warn(
                "The configured path to the entity checking index (\"{}\") does not exist,  is not a directory or is an empty directory. Returning null.",
                directoryPath.toString());
        return null;
    }
}
 
Example 27
Source Project: solr-redis   Source File: TestRedisQParser.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void shouldAddTermsFromRedisOnLrangeCommandCustomMax() throws SyntaxError, IOException {
  when(localParamsMock.get("command")).thenReturn("lrange");
  when(localParamsMock.get("max")).thenReturn("1");
  when(localParamsMock.get("key")).thenReturn("simpleKey");
  when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
  when(jedisMock.lrange(anyString(), anyLong(), anyLong())).thenReturn(Arrays.asList("123", "321"));
  when(requestMock.getSchema()).thenReturn(schema);
  when(schema.getQueryAnalyzer()).thenReturn(new StandardAnalyzer());
  redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
  final Query query = redisQParser.parse();
  verify(jedisMock).lrange("simpleKey", 0, 1);
  IndexSearcher searcher = new IndexSearcher(new MultiReader());
  final Set<Term> terms = extractTerms(searcher, query);
  Assert.assertEquals(2, terms.size());
}
 
Example 28
Source Project: lucene-solr   Source File: TestFieldCacheSort.java    License: Apache License 2.0 6 votes vote down vote up
/** test that we throw exception on multi-valued field, creates corrupt reader, use SORTED_SET instead */
public void testMultiValuedField() throws IOException {
  Directory indexStore = newDirectory();
  IndexWriter writer = new IndexWriter(indexStore, newIndexWriterConfig(new MockAnalyzer(random())));
  for(int i=0; i<5; i++) {
      Document doc = new Document();
      doc.add(new StringField("string", "a"+i, Field.Store.NO));
      doc.add(new StringField("string", "b"+i, Field.Store.NO));
      writer.addDocument(doc);
  }
  writer.forceMerge(1); // enforce one segment to have a higher unique term count in all cases
  writer.close();
  Sort sort = new Sort(
      new SortField("string", SortField.Type.STRING),
      SortField.FIELD_DOC);
  IndexReader reader = UninvertingReader.wrap(DirectoryReader.open(indexStore),
                       Collections.singletonMap("string", Type.SORTED));
  IndexSearcher searcher = new IndexSearcher(reader);
  expectThrows(IllegalStateException.class, () -> {
    searcher.search(new MatchAllDocsQuery(), 500, sort);
  });
  reader.close();
  indexStore.close();
}
 
Example 29
Source Project: AGDISTIS   Source File: TripleIndex.java    License: GNU Affero General Public License v3.0 6 votes vote down vote up
public TripleIndex() throws IOException {
	Properties prop = new Properties();
	InputStream input = TripleIndex.class.getResourceAsStream("/config/agdistis.properties");
	prop.load(input);

	String envIndex = System.getenv("AGDISTIS_INDEX");
	String index = envIndex != null ? envIndex : prop.getProperty("index");
	log.info("The index will be here: " + index);

	directory = new MMapDirectory(new File(index));
	ireader = DirectoryReader.open(directory);
	isearcher = new IndexSearcher(ireader);
	this.urlValidator = new UrlValidator();

	cache = CacheBuilder.newBuilder().maximumSize(50000).build();
}
 
Example 30
Source Project: incubator-pinot   Source File: TextSearchQueriesTest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void run() {
  try {
    Query query = queryParser.parse("\"machine learning\" AND spark");
    int count  = 0;
    int prevHits = 0;
    // run the same query 1000 times and see in increasing number of hits
    // in the index
    while (count < 1000) {
      IndexSearcher indexSearcher = searchManager.acquire();
      int hits = indexSearcher.search(query, Integer.MAX_VALUE).scoreDocs.length;
      // TODO: see how we can make this more deterministic
      if (count > 200) {
        // we should see an increasing number of hits
        Assert.assertTrue(hits > 0);
        Assert.assertTrue(hits >= prevHits);
      }
      count++;
      prevHits = hits;
      searchManager.release(indexSearcher);
      Thread.sleep(1);
    }
  } catch (Exception e) {
    throw new RuntimeException("Caught exception in realtime reader");
  }
}