Java Code Examples for org.apache.lucene.index.DirectoryReader
The following examples show how to use
org.apache.lucene.index.DirectoryReader. These examples are extracted from open source projects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: lucene-solr Source File: BaseShapeTestCase.java License: Apache License 2.0 | 6 votes |
private void verify(Object... shapes) throws Exception { IndexWriterConfig iwc = newIndexWriterConfig(); iwc.setMergeScheduler(new SerialMergeScheduler()); int mbd = iwc.getMaxBufferedDocs(); if (mbd != -1 && mbd < shapes.length / 100) { iwc.setMaxBufferedDocs(shapes.length / 100); } Directory dir; if (shapes.length > 1000) { dir = newFSDirectory(createTempDir(getClass().getSimpleName())); } else { dir = newDirectory(); } IndexWriter w = new IndexWriter(dir, iwc); // index random polygons indexRandomShapes(w, shapes); // query testing final IndexReader reader = DirectoryReader.open(w); // test random bbox queries verifyRandomQueries(reader, shapes); IOUtils.close(w, reader, dir); }
Example 2
Source Project: lucene4ir Source File: RetrievalAppQueryExpansion.java License: Apache License 2.0 | 6 votes |
public RetrievalAppQueryExpansion(String retrievalParamFile){ System.out.println("Retrieval App"); readParamsFromFile(retrievalParamFile); try { reader = DirectoryReader.open(FSDirectory.open(new File(p.indexName).toPath())); searcher = new IndexSearcher(reader); // Create similarity function and parameter selectSimilarityFunction(sim); searcher.setSimilarity(simfn); // Use whatever ANALYZER you want analyzer = new StandardAnalyzer(); parser = new QueryParser("content", analyzer); } catch (Exception e){ System.out.println(" caught a " + e.getClass() + "\n with message: " + e.getMessage()); } }
Example 3
Source Project: crate Source File: GroupByOptimizedIteratorTest.java License: Apache License 2.0 | 6 votes |
@Test public void testHighCardinalityRatioReturnsTrueForHighCardinality() throws Exception { IndexWriter iw = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new StandardAnalyzer())); String columnName = "x"; for (int i = 0; i < 10; i++) { Document doc = new Document(); BytesRef value = new BytesRef(Integer.toString(i)); doc.add(new Field(columnName, value, KeywordFieldMapper.Defaults.FIELD_TYPE.clone())); iw.addDocument(doc); } iw.commit(); IndexSearcher indexSearcher = new IndexSearcher(DirectoryReader.open(iw)); assertThat( GroupByOptimizedIterator.hasHighCardinalityRatio(() -> new Engine.Searcher("dummy", indexSearcher, () -> {}), "x"), is(true) ); }
Example 4
Source Project: lucene-solr Source File: TestQueryBitSetProducer.java License: Apache License 2.0 | 6 votes |
public void testSimple() throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); w.addDocument(new Document()); DirectoryReader reader = w.getReader(); QueryBitSetProducer producer = new QueryBitSetProducer(new MatchNoDocsQuery()); assertNull(producer.getBitSet(reader.leaves().get(0))); assertEquals(1, producer.cache.size()); producer = new QueryBitSetProducer(new MatchAllDocsQuery()); BitSet bitSet = producer.getBitSet(reader.leaves().get(0)); assertEquals(1, bitSet.length()); assertEquals(true, bitSet.get(0)); assertEquals(1, producer.cache.size()); IOUtils.close(reader, w, dir); }
Example 5
Source Project: lucene-solr Source File: TermInSetQueryTest.java License: Apache License 2.0 | 6 votes |
public void testPullOneTermsEnum() throws Exception { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document doc = new Document(); doc.add(new StringField("foo", "1", Store.NO)); w.addDocument(doc); DirectoryReader reader = w.getReader(); w.close(); final AtomicInteger counter = new AtomicInteger(); DirectoryReader wrapped = new TermsCountingDirectoryReaderWrapper(reader, counter); final List<BytesRef> terms = new ArrayList<>(); // enough terms to avoid the rewrite final int numTerms = TestUtil.nextInt(random(), TermInSetQuery.BOOLEAN_REWRITE_TERM_COUNT_THRESHOLD + 1, 100); for (int i = 0; i < numTerms; ++i) { final BytesRef term = new BytesRef(RandomStrings.randomUnicodeOfCodepointLength(random(), 10)); terms.add(term); } assertEquals(0, new IndexSearcher(wrapped).count(new TermInSetQuery("bar", terms))); assertEquals(0, counter.get()); // missing field new IndexSearcher(wrapped).count(new TermInSetQuery("foo", terms)); assertEquals(1, counter.get()); wrapped.close(); dir.close(); }
Example 6
Source Project: tutorials Source File: InMemoryLuceneIndex.java License: MIT License | 6 votes |
public List<Document> searchIndex(String inField, String queryString) { try { Query query = new QueryParser(inField, analyzer).parse(queryString); IndexReader indexReader = DirectoryReader.open(memoryIndex); IndexSearcher searcher = new IndexSearcher(indexReader); TopDocs topDocs = searcher.search(query, 10); List<Document> documents = new ArrayList<>(); for (ScoreDoc scoreDoc : topDocs.scoreDocs) { documents.add(searcher.doc(scoreDoc.doc)); } return documents; } catch (IOException | ParseException e) { e.printStackTrace(); } return null; }
Example 7
Source Project: sdudoc Source File: LuceneIndexSearch.java License: MIT License | 6 votes |
/** * 初始化indexSearch对象的方法 * @throws Exception */ public void createIndexSearch(){ try{ IndexReader indexReader = DirectoryReader.open(this.indexSettings.directory); this.indexSearcher = new IndexSearcher(indexReader); //输出现在的索引 // for(int i =0; i<indexReader.numDocs();i++){ // System.out.println(indexReader.document(i)); // System.out.println("文件名称:"+indexReader.document(i).get("fileName")+"\t文件描述:"+indexReader.document(i).get("fileDesc")+"\t文件ID:"+indexReader.document(i).get("fileId")+"\t创建者:"+indexReader.document(i).get("fileCreator")); // } // System.out.println("索引版本:" + indexReader.getCoreCacheKey()); // System.out.println("索引内文档数量:"+indexReader.numDocs()); }catch(Exception e){ e.printStackTrace(); } }
Example 8
Source Project: lucene-solr Source File: TestIndexSortSortedNumericDocValuesRangeQuery.java License: Apache License 2.0 | 6 votes |
public void testIndexSortOptimizationDeactivated(RandomIndexWriter writer) throws IOException { DirectoryReader reader = writer.getReader(); IndexSearcher searcher = newSearcher(reader); Query query = createQuery("field", 0, 0); Weight weight = query.createWeight(searcher, ScoreMode.TOP_SCORES, 1.0F); // Check that the two-phase iterator is not null, indicating that we've fallen // back to SortedNumericDocValuesField.newSlowRangeQuery. for (LeafReaderContext context : searcher.getIndexReader().leaves()) { Scorer scorer = weight.scorer(context); assertNotNull(scorer.twoPhaseIterator()); } reader.close(); }
Example 9
Source Project: lucene-solr Source File: TestLRUQueryCache.java License: Apache License 2.0 | 6 votes |
public DummyDirectoryReader(DirectoryReader in) throws IOException { super(in, new SubReaderWrapper() { @Override public LeafReader wrap(LeafReader reader) { return new FilterLeafReader(reader) { @Override public CacheHelper getCoreCacheHelper() { return null; } @Override public CacheHelper getReaderCacheHelper() { return null; } }; } }); }
Example 10
Source Project: lucene-solr Source File: LukeRequestHandler.java License: Apache License 2.0 | 6 votes |
public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader) throws IOException { Directory dir = reader.directory(); SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<>(); indexInfo.add("numDocs", reader.numDocs()); indexInfo.add("maxDoc", reader.maxDoc()); indexInfo.add("deletedDocs", reader.maxDoc() - reader.numDocs()); indexInfo.add("indexHeapUsageBytes", getIndexHeapUsed(reader)); indexInfo.add("version", reader.getVersion()); // TODO? Is this different then: IndexReader.getCurrentVersion( dir )? indexInfo.add("segmentCount", reader.leaves().size()); indexInfo.add("current", closeSafe( reader::isCurrent)); indexInfo.add("hasDeletions", reader.hasDeletions() ); indexInfo.add("directory", dir ); IndexCommit indexCommit = reader.getIndexCommit(); String segmentsFileName = indexCommit.getSegmentsFileName(); indexInfo.add("segmentsFile", segmentsFileName); indexInfo.add("segmentsFileSizeInBytes", getSegmentsFileLength(indexCommit)); Map<String,String> userData = indexCommit.getUserData(); indexInfo.add("userData", userData); String s = userData.get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY); if (s != null) { indexInfo.add("lastModified", new Date(Long.parseLong(s))); } return indexInfo; }
Example 11
Source Project: lucene-solr Source File: TestRegexCompletionQuery.java License: Apache License 2.0 | 6 votes |
@Test public void testEmptyRegexContextQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new ContextSuggestField("suggest_field", "suggestion", 1, "type")); iw.addDocument(document); if (rarely()) { iw.commit(); } DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); ContextQuery query = new ContextQuery(new RegexCompletionQuery(new Term("suggest_field", ""))); query.addContext("type", 1); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); assertEquals(0, suggest.scoreDocs.length); reader.close(); iw.close(); }
Example 12
Source Project: lucene-solr Source File: TestQueryBitSetProducer.java License: Apache License 2.0 | 6 votes |
public void testReaderNotSuitedForCaching() throws IOException{ Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE); RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc); w.addDocument(new Document()); DirectoryReader reader = new DummyDirectoryReader(w.getReader()); QueryBitSetProducer producer = new QueryBitSetProducer(new MatchNoDocsQuery()); assertNull(producer.getBitSet(reader.leaves().get(0))); assertEquals(0, producer.cache.size()); producer = new QueryBitSetProducer(new MatchAllDocsQuery()); BitSet bitSet = producer.getBitSet(reader.leaves().get(0)); assertEquals(1, bitSet.length()); assertEquals(true, bitSet.get(0)); assertEquals(0, producer.cache.size()); IOUtils.close(reader, w, dir); }
Example 13
Source Project: lucene-solr Source File: LukeRequestHandler.java License: Apache License 2.0 | 6 votes |
/** Returns the sum of RAM bytes used by each segment */ private static long getIndexHeapUsed(DirectoryReader reader) { return reader.leaves().stream() .map(LeafReaderContext::reader) .map(FilterLeafReader::unwrap) .map(leafReader -> { if (leafReader instanceof Accountable) { return ((Accountable) leafReader).ramBytesUsed(); } else { return -1L; // unsupported } }) .mapToLong(Long::longValue) .reduce(0, (left, right) -> left == -1 || right == -1 ? -1 : left + right); // if any leaves are unsupported (-1), we ultimately return -1. }
Example 14
Source Project: lucene-solr Source File: TestRegexCompletionQuery.java License: Apache License 2.0 | 6 votes |
@Test public void testEmptyRegexQuery() throws Exception { Analyzer analyzer = new MockAnalyzer(random()); RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field")); Document document = new Document(); document.add(new SuggestField("suggest_field", "suggestion1", 1)); iw.addDocument(document); if (rarely()) { iw.commit(); } DirectoryReader reader = iw.getReader(); SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader); RegexCompletionQuery query = new RegexCompletionQuery(new Term("suggest_field", "")); TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false); assertEquals(0, suggest.scoreDocs.length); reader.close(); iw.close(); }
Example 15
Source Project: lucene-solr Source File: TestBooleanQuery.java License: Apache License 2.0 | 6 votes |
public void testMinShouldMatchLeniency() throws Exception { Directory dir = newDirectory(); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))); Document doc = new Document(); doc.add(newTextField("field", "a b c d", Field.Store.NO)); w.addDocument(doc); IndexReader r = DirectoryReader.open(w); IndexSearcher s = newSearcher(r); BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD); bq.add(new TermQuery(new Term("field", "b")), BooleanClause.Occur.SHOULD); // No doc can match: BQ has only 2 clauses and we are asking for minShouldMatch=4 bq.setMinimumNumberShouldMatch(4); assertEquals(0, s.search(bq.build(), 1).totalHits.value); r.close(); w.close(); dir.close(); }
Example 16
Source Project: incubator-retired-blur Source File: MutatableActionTest.java License: Apache License 2.0 | 6 votes |
@Test public void testDeleteRecord() throws IOException { RAMDirectory directory = new RAMDirectory(); DirectoryReader reader = getIndexReader(directory); IndexWriter writer = new IndexWriter(directory, _conf.clone()); assertEquals(0, reader.numDocs()); Row row = genRow(); List<Column> cols = new ArrayList<Column>(); cols.add(new Column("n", "v")); row.addToRecords(new Record("1", "fam", cols)); _action.replaceRow(row); _action.performMutate(getSearcher(reader, directory), writer); reader = commitAndReopen(reader, writer); assertEquals(2, reader.numDocs()); _action.deleteRecord(row.getId(), "1"); _action.performMutate(getSearcher(reader, directory), writer); reader = commitAndReopen(reader, writer); assertEquals(1, reader.numDocs()); }
Example 17
Source Project: lucene-solr Source File: QueryParserTestBase.java License: Apache License 2.0 | 6 votes |
public void testPositionIncrements() throws Exception { Directory dir = newDirectory(); Analyzer a = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true, MockTokenFilter.ENGLISH_STOPSET); IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(a)); Document doc = new Document(); doc.add(newTextField("field", "the wizard of ozzy", Field.Store.NO)); w.addDocument(doc); IndexReader r = DirectoryReader.open(w); w.close(); IndexSearcher s = newSearcher(r); Query q = getQuery("\"wizard of ozzy\"",a); assertEquals(1, s.search(q, 1).totalHits.value); r.close(); dir.close(); }
Example 18
Source Project: incubator-retired-blur Source File: Blur024CodecTest.java License: Apache License 2.0 | 6 votes |
@Test public void testDocValuesFormat() throws IOException { RAMDirectory directory = new RAMDirectory(); IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new WhitespaceAnalyzer(Version.LUCENE_43)); conf.setCodec(new Blur024Codec()); IndexWriter writer = new IndexWriter(directory, conf); Document doc = new Document(); doc.add(new StringField("f", "v", Store.YES)); doc.add(new SortedDocValuesField("f", new BytesRef("v"))); writer.addDocument(doc); writer.close(); DirectoryReader reader = DirectoryReader.open(directory); AtomicReaderContext context = reader.leaves().get(0); AtomicReader atomicReader = context.reader(); SortedDocValues sortedDocValues = atomicReader.getSortedDocValues("f"); assertTrue(sortedDocValues.getClass().getName().startsWith(DiskDocValuesProducer.class.getName())); reader.close(); }
Example 19
Source Project: scava Source File: SORecommender.java License: Eclipse Public License 2.0 | 6 votes |
public TopDocs executeQuery(org.apache.lucene.search.Query query) throws IOException, ParseException { Directory indexDir = FSDirectory.open(Paths.get(INDEX_DIRECTORY)); try { IndexReader reader = DirectoryReader.open(indexDir); IndexSearcher searcher = new IndexSearcher(reader); if (isBm25 == false) { ClassicSimilarity CS = new ClassicSimilarity(); searcher.setSimilarity(CS); } TopDocs docs = searcher.search(query, hitsPerPage); return docs; } catch (Exception e) { logger.error(e.getMessage()); return null; } }
Example 20
Source Project: lucene-solr Source File: PayloadHelper.java License: Apache License 2.0 | 6 votes |
/** * Sets up a RAM-resident Directory, and adds documents (using English.intToEnglish()) with two fields: field and multiField * and analyzes them using the PayloadAnalyzer * @param similarity The Similarity class to use in the Searcher * @param numDocs The num docs to add * @return An IndexSearcher */ // TODO: randomize public IndexSearcher setUp(Random random, Similarity similarity, int numDocs) throws IOException { Directory directory = new MockDirectoryWrapper(random, new ByteBuffersDirectory()); PayloadAnalyzer analyzer = new PayloadAnalyzer(); // TODO randomize this IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig( analyzer).setSimilarity(similarity)); // writer.infoStream = System.out; for (int i = 0; i < numDocs; i++) { Document doc = new Document(); doc.add(new TextField(FIELD, English.intToEnglish(i), Field.Store.YES)); doc.add(new TextField(MULTI_FIELD, English.intToEnglish(i) + " " + English.intToEnglish(i), Field.Store.YES)); doc.add(new TextField(NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES)); writer.addDocument(doc); } writer.forceMerge(1); reader = DirectoryReader.open(writer); writer.close(); IndexSearcher searcher = LuceneTestCase.newSearcher(LuceneTestCase.getOnlyLeafReader(reader)); searcher.setSimilarity(similarity); return searcher; }
Example 21
Source Project: lucene-solr Source File: AssociationsFacetsExample.java License: Apache License 2.0 | 6 votes |
/** User drills down on 'tags/solr'. */ private FacetResult drillDown() throws IOException { DirectoryReader indexReader = DirectoryReader.open(indexDir); IndexSearcher searcher = new IndexSearcher(indexReader); TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir); // Passing no baseQuery means we drill down on all // documents ("browse only"): DrillDownQuery q = new DrillDownQuery(config); // Now user drills down on Publish Date/2010: q.add("tags", "solr"); FacetsCollector fc = new FacetsCollector(); FacetsCollector.search(searcher, q, 10, fc); // Retrieve results Facets facets = new TaxonomyFacetSumFloatAssociations("$genre", taxoReader, config, fc); FacetResult result = facets.getTopChildren(10, "genre"); indexReader.close(); taxoReader.close(); return result; }
Example 22
Source Project: lucene-solr Source File: TestBooleanQuery.java License: Apache License 2.0 | 5 votes |
public void testReqOptPropagatesApproximations() throws IOException { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document doc = new Document(); Field f = newTextField("field", "a b c", Field.Store.NO); doc.add(f); w.addDocument(doc); w.commit(); DirectoryReader reader = w.getReader(); final IndexSearcher searcher = new IndexSearcher(reader); searcher.setQueryCache(null); // to still have approximations PhraseQuery pq = new PhraseQuery("field", "a", "b"); BooleanQuery.Builder q = new BooleanQuery.Builder(); q.add(pq, Occur.MUST); q.add(new TermQuery(new Term("field", "c")), Occur.SHOULD); final Weight weight = searcher.createWeight(searcher.rewrite(q.build()), ScoreMode.COMPLETE, 1); final Scorer scorer = weight.scorer(reader.leaves().get(0)); assertTrue(scorer instanceof ReqOptSumScorer); assertNotNull(scorer.twoPhaseIterator()); reader.close(); w.close(); dir.close(); }
Example 23
Source Project: lucene-solr Source File: TestBooleanRewrites.java License: Apache License 2.0 | 5 votes |
public void testOneClauseRewriteOptimization() throws Exception { final String FIELD = "content"; final String VALUE = "foo"; Directory dir = newDirectory(); (new RandomIndexWriter(random(), dir)).close(); IndexReader r = DirectoryReader.open(dir); TermQuery expected = new TermQuery(new Term(FIELD, VALUE)); final int numLayers = atLeast(3); Query actual = new TermQuery(new Term(FIELD, VALUE)); for (int i = 0; i < numLayers; i++) { BooleanQuery.Builder bq = new BooleanQuery.Builder(); bq.add(actual, random().nextBoolean() ? BooleanClause.Occur.SHOULD : BooleanClause.Occur.MUST); actual = bq.build(); } assertEquals(numLayers + ": " + actual.toString(), expected, new IndexSearcher(r).rewrite(actual)); r.close(); dir.close(); }
Example 24
Source Project: lucene-solr Source File: TestSpellChecker.java License: Apache License 2.0 | 5 votes |
private int numdoc() throws IOException { IndexReader rs = DirectoryReader.open(spellindex); int num = rs.numDocs(); assertTrue(num != 0); //System.out.println("num docs: " + num); rs.close(); return num; }
Example 25
Source Project: lucene-solr Source File: TestBooleanQuery.java License: Apache License 2.0 | 5 votes |
public void testFILTERClauseBehavesLikeMUST() throws IOException { Directory dir = newDirectory(); RandomIndexWriter w = new RandomIndexWriter(random(), dir); Document doc = new Document(); Field f = newTextField("field", "a b c d", Field.Store.NO); doc.add(f); w.addDocument(doc); f.setStringValue("b d"); w.addDocument(doc); f.setStringValue("d"); w.addDocument(doc); w.commit(); DirectoryReader reader = w.getReader(); final IndexSearcher searcher = new IndexSearcher(reader); for (List<String> requiredTerms : Arrays.<List<String>>asList( Arrays.asList("a", "d"), Arrays.asList("a", "b", "d"), Arrays.asList("d"), Arrays.asList("e"), Arrays.asList())) { final BooleanQuery.Builder bq1 = new BooleanQuery.Builder(); final BooleanQuery.Builder bq2 = new BooleanQuery.Builder(); for (String term : requiredTerms) { final Query q = new TermQuery(new Term("field", term)); bq1.add(q, Occur.MUST); bq2.add(q, Occur.FILTER); } final BitSet matches1 = getMatches(searcher, bq1.build()); final BitSet matches2 = getMatches(searcher, bq2.build()); assertEquals(matches1, matches2); } reader.close(); w.close(); dir.close(); }
Example 26
Source Project: lucene-solr Source File: TestFieldCacheReopen.java License: Apache License 2.0 | 5 votes |
public void testFieldCacheReuseAfterReopen() throws Exception { Directory dir = newDirectory(); IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(new MockAnalyzer(random())). setMergePolicy(newLogMergePolicy(10)) ); Document doc = new Document(); doc.add(new IntPoint("number", 17)); writer.addDocument(doc); writer.commit(); // Open reader1 DirectoryReader r = DirectoryReader.open(dir); LeafReader r1 = getOnlyLeafReader(r); final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(r1, "number", FieldCache.INT_POINT_PARSER); assertEquals(0, ints.nextDoc()); assertEquals(17, ints.longValue()); // Add new segment writer.addDocument(doc); writer.commit(); // Reopen reader1 --> reader2 DirectoryReader r2 = DirectoryReader.openIfChanged(r); assertNotNull(r2); r.close(); LeafReader sub0 = r2.leaves().get(0).reader(); final NumericDocValues ints2 = FieldCache.DEFAULT.getNumerics(sub0, "number", FieldCache.INT_POINT_PARSER); r2.close(); assertEquals(0, ints2.nextDoc()); assertEquals(17, ints2.longValue()); writer.close(); dir.close(); }
Example 27
Source Project: gerbil Source File: Searcher.java License: GNU Affero General Public License v3.0 | 5 votes |
public Searcher(String indexDirectoryPath) throws GerbilException { try { indexDirectory = FSDirectory.open(new File( indexDirectoryPath).toPath()); indexReader = DirectoryReader.open(indexDirectory); indexSearcher = new IndexSearcher(indexReader); } catch (IOException e) { throw new GerbilException("Could not initialize Searcher", ErrorTypes.UNEXPECTED_EXCEPTION); } }
Example 28
Source Project: crate Source File: IndexSearcherWrapper.java License: Apache License 2.0 | 5 votes |
/** * If there are configured {@link IndexSearcherWrapper} instances, the {@link IndexSearcher} of the provided engine searcher * gets wrapped and a new {@link Engine.Searcher} instances is returned, otherwise the provided {@link Engine.Searcher} is returned. * * This is invoked each time a {@link Engine.Searcher} is requested to do an operation. (for example search) */ public final Engine.Searcher wrap(Engine.Searcher engineSearcher) throws IOException { final ElasticsearchDirectoryReader elasticsearchDirectoryReader = ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(engineSearcher.getDirectoryReader()); if (elasticsearchDirectoryReader == null) { throw new IllegalStateException("Can't wrap non elasticsearch directory reader"); } NonClosingReaderWrapper nonClosingReaderWrapper = new NonClosingReaderWrapper(engineSearcher.getDirectoryReader()); DirectoryReader reader = wrap(nonClosingReaderWrapper); if (reader != nonClosingReaderWrapper) { if (reader.getReaderCacheHelper() != elasticsearchDirectoryReader.getReaderCacheHelper()) { throw new IllegalStateException("wrapped directory reader doesn't delegate IndexReader#getCoreCacheKey, wrappers must override this method and delegate" + " to the original readers core cache key. Wrapped readers can't be used as cache keys since their are used only per request which would lead to subtle bugs"); } if (ElasticsearchDirectoryReader.getElasticsearchDirectoryReader(reader) != elasticsearchDirectoryReader) { // prevent that somebody wraps with a non-filter reader throw new IllegalStateException("wrapped directory reader hides actual ElasticsearchDirectoryReader but shouldn't"); } } final IndexSearcher origIndexSearcher = engineSearcher.searcher(); final IndexSearcher innerIndexSearcher = new IndexSearcher(reader); innerIndexSearcher.setQueryCache(origIndexSearcher.getQueryCache()); innerIndexSearcher.setQueryCachingPolicy(origIndexSearcher.getQueryCachingPolicy()); innerIndexSearcher.setSimilarity(origIndexSearcher.getSimilarity()); // TODO: Right now IndexSearcher isn't wrapper friendly, when it becomes wrapper friendly we should revise this extension point // For example if IndexSearcher#rewrite() is overwritten than also IndexSearcher#createNormalizedWeight needs to be overwritten // This needs to be fixed before we can allow the IndexSearcher from Engine to be wrapped multiple times final IndexSearcher indexSearcher = wrap(innerIndexSearcher); if (reader == nonClosingReaderWrapper && indexSearcher == innerIndexSearcher) { return engineSearcher; } else { // we close the reader to make sure wrappers can release resources if needed.... // our NonClosingReaderWrapper makes sure that our reader is not closed return new Engine.Searcher(engineSearcher.source(), indexSearcher, () -> IOUtils.close(indexSearcher.getIndexReader(), // this will close the wrappers excluding the NonClosingReaderWrapper engineSearcher)); // this will run the closeable on the wrapped engine searcher } }
Example 29
Source Project: incubator-retired-blur Source File: LookupBuilderReducer.java License: Apache License 2.0 | 5 votes |
private MergeSortRowIdMatcher getMergeSortRowIdMatcher(Text rowId, Reducer<Text, NullWritable, Text, BooleanWritable>.Context context) throws IOException { BlurPartitioner blurPartitioner = new BlurPartitioner(); int shard = blurPartitioner.getShard(rowId, _numberOfShardsInTable); String shardName = ShardUtil.getShardName(shard); Path shardPath = new Path(_tablePath, shardName); HdfsDirectory hdfsDirectory = new HdfsDirectory(_configuration, shardPath); SnapshotIndexDeletionPolicy policy = new SnapshotIndexDeletionPolicy(_configuration, SnapshotIndexDeletionPolicy.getGenerationsPath(shardPath)); Long generation = policy.getGeneration(_snapshot); if (generation == null) { hdfsDirectory.close(); throw new IOException("Snapshot [" + _snapshot + "] not found in shard [" + shardPath + "]"); } BlurConfiguration bc = new BlurConfiguration(); BlockCacheDirectoryFactoryV2 blockCacheDirectoryFactoryV2 = new BlockCacheDirectoryFactoryV2(bc, _totalNumberOfBytes); _closer.register(blockCacheDirectoryFactoryV2); Directory dir = blockCacheDirectoryFactoryV2.newDirectory("table", "shard", hdfsDirectory, null); List<IndexCommit> listCommits = DirectoryReader.listCommits(dir); IndexCommit indexCommit = ExistingDataIndexLookupMapper.findIndexCommit(listCommits, generation, shardPath); DirectoryReader reader = DirectoryReader.open(indexCommit); _rowIdsFromIndex.setValue(getTotalNumberOfRowIds(reader)); Path cachePath = MergeSortRowIdMatcher.getCachePath(_cachePath, _table, shardName); return new MergeSortRowIdMatcher(dir, generation, _configuration, cachePath, context); }
Example 30
Source Project: ml-blog Source File: LuceneService.java License: MIT License | 5 votes |
private IndexSearcher getIndexSearcher() throws IOException { if (reader == null) { reader = DirectoryReader.open(directory); } else { DirectoryReader changeReader = DirectoryReader.openIfChanged(reader); if (changeReader != null) { reader.close(); reader = changeReader; } } return new IndexSearcher(reader); }