Java Code Examples for org.apache.lucene.search.IndexSearcher#close()

The following examples show how to use org.apache.lucene.search.IndexSearcher#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CrawlerTest.java    From JPPF with Apache License 2.0 6 votes vote down vote up
/**
 * Test searching with Lucene.
 * @param search the Lucene query text.
 * @param max the maximum number of results to show.
 * @throws Exception if an error is thrown while executing.
 */
public static void luceneSearch(final String search, final int max) throws Exception {
  print("Searching for: " + search);
  print("  max results: " + max);

  final IndexSearcher is = new IndexSearcher(index);
  final QueryParser parser = new QueryParser("contents", new StandardAnalyzer());

  final Query query = parser.parse(search);
  final Hits hits = is.search(query);

  print("    results: " + hits.length());

  for (int i = 0; i < Math.min(hits.length(), max); i++) {
    final float relevance = ((float) Math.round(hits.score(i) * 1000)) / 10;
    final String url = hits.doc(i).getField("url").stringValue();
    print("No " + (i + 1) + " with relevance " + relevance + "% : " + url);
  }

  is.close();
}
 
Example 2
Source File: AutoCompleter.java    From webdsl with Apache License 2.0 6 votes vote down vote up
private void swapSearcher(final Directory dir) throws IOException {
  /*
   * opening a searcher is possibly very expensive.
   * We rather close it again if the Autocompleter was closed during
   * this operation than block access to the current searcher while opening.
   */
  final IndexSearcher indexSearcher = createSearcher(dir);
  synchronized (searcherLock) {
    if(closed){
      indexSearcher.close();
      throw new AlreadyClosedException("Autocompleter has been closed");
    }
    if (searcher != null) {
      searcher.close();
    }
    // set the autocomplete index in the sync block - ensure consistency.
    searcher = indexSearcher;
    this.autoCompleteIndex = dir;
  }
}
 
Example 3
Source File: TestMixedDirectory.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private void verify(Directory dir, int expectedHits) throws IOException {
  IndexSearcher searcher = new IndexSearcher(dir);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  int numHits = hits.length();

  assertEquals(expectedHits, numHits);

  int[] docs = new int[numHits];
  for (int i = 0; i < numHits; i++) {
    Document hit = hits.doc(i);
    docs[Integer.parseInt(hit.get("id"))]++;
  }
  for (int i = 0; i < numHits; i++) {
    assertEquals(1, docs[i]);
  }

  searcher.close();
}
 
Example 4
Source File: TestMixedDirectory.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
private void verify(Directory dir, int expectedHits) throws IOException {
  IndexSearcher searcher = new IndexSearcher(dir);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  int numHits = hits.length();

  assertEquals(expectedHits, numHits);

  int[] docs = new int[numHits];
  for (int i = 0; i < numHits; i++) {
    Document hit = hits.doc(i);
    docs[Integer.parseInt(hit.get("id"))]++;
  }
  for (int i = 0; i < numHits; i++) {
    assertEquals(1, docs[i]);
  }

  searcher.close();
}
 
Example 5
Source File: CourseServiceImpl.java    From TinyMooc with Apache License 2.0 5 votes vote down vote up
public List<Course> getCourses(String query) {
    try {
        List<Course> qlist = new ArrayList<Course>();
        IndexSearcher indexSearcher = new IndexSearcher(INDEXPATH);
        long begin = new Date().getTime();
        //下面的是进行title,content 两个范围内进行收索. SHOULD 表示OR
        BooleanClause.Occur[] clauses = {BooleanClause.Occur.SHOULD, BooleanClause.Occur.SHOULD};
        Query queryOBJ = MultiFieldQueryParser.parse(query, new String[]{"courseIntro", "courseTitle"}, clauses, new StandardAnalyzer());//parser.parse(query);
        Filter filter = null;
        //################# 搜索相似度最高的记录 ###################
        TopDocs topDocs = indexSearcher.search(queryOBJ, filter, 1000);
        Course course = null;

        //输出结果
        for (ScoreDoc scoreDoc : topDocs.scoreDocs) {
            Document targetDoc = indexSearcher.doc(scoreDoc.doc);
            course = new Course();
            String courseIntro = targetDoc.get("courseIntro");
            String courseTitle = targetDoc.get("courseTitle");
            String courseId = targetDoc.get("courseId");
            TokenStream contentTokenStream = analyzer.tokenStream("courseIntro", new StringReader(courseIntro));
            TokenStream titleTokenStream = analyzer.tokenStream("courseTitle", new StringReader(courseTitle));
            course.setCourseIntro(courseIntro);
            course.setCourseTitle(courseTitle);
            course.setCourseId(courseId);
            course.setType(targetDoc.get("type"));
            course.setCourseState(targetDoc.get("courseState"));
            qlist.add(course);
        }
        indexSearcher.close();
        return qlist;
    } catch (Exception e) {
        logger.error("getCourses error.");
        return null;
    }
}
 
Example 6
Source File: FilesystemStore.java    From restcommander with Apache License 2.0 5 votes vote down vote up
public void stop() throws Exception {
    for (IndexWriter writer : indexWriters.values()) {
        writer.close();
    }
    for (IndexSearcher searcher : indexSearchers.values()) {
        searcher.close();
    }
    indexWriters.clear();
    indexSearchers.clear();
}
 
Example 7
Source File: TestDistributionPolicy.java    From RDFS with Apache License 2.0 5 votes vote down vote up
private void verify(Shard[] shards) throws IOException {
  // verify the index
  IndexReader[] readers = new IndexReader[shards.length];
  for (int i = 0; i < shards.length; i++) {
    Directory dir =
        new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
            false, conf);
    readers[i] = IndexReader.open(dir);
  }

  IndexReader reader = new MultiReader(readers);
  IndexSearcher searcher = new IndexSearcher(reader);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  assertEquals(0, hits.length());

  hits = searcher.search(new TermQuery(new Term("content", "hadoop")));
  assertEquals(numDocsPerRun / 2, hits.length());

  int[] counts = new int[numDocsPerRun];
  for (int i = 0; i < hits.length(); i++) {
    Document doc = hits.doc(i);
    counts[Integer.parseInt(doc.get("id"))]++;
  }

  for (int i = 0; i < numDocsPerRun; i++) {
    if (i % 2 == 0) {
      assertEquals(0, counts[i]);
    } else {
      assertEquals(1, counts[i]);
    }
  }

  searcher.close();
  reader.close();
}
 
Example 8
Source File: TestDistributionPolicy.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
private void verify(Shard[] shards) throws IOException {
  // verify the index
  IndexReader[] readers = new IndexReader[shards.length];
  for (int i = 0; i < shards.length; i++) {
    Directory dir =
        new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
            false, conf);
    readers[i] = IndexReader.open(dir);
  }

  IndexReader reader = new MultiReader(readers);
  IndexSearcher searcher = new IndexSearcher(reader);
  Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));
  assertEquals(0, hits.length());

  hits = searcher.search(new TermQuery(new Term("content", "hadoop")));
  assertEquals(numDocsPerRun / 2, hits.length());

  int[] counts = new int[numDocsPerRun];
  for (int i = 0; i < hits.length(); i++) {
    Document doc = hits.doc(i);
    counts[Integer.parseInt(doc.get("id"))]++;
  }

  for (int i = 0; i < numDocsPerRun; i++) {
    if (i % 2 == 0) {
      assertEquals(0, counts[i]);
    } else {
      assertEquals(1, counts[i]);
    }
  }

  searcher.close();
  reader.close();
}