Java Code Examples for org.apache.lucene.index.DirectoryReader#leaves()
The following examples show how to use
org.apache.lucene.index.DirectoryReader#leaves() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ShardWriter.java From linden with Apache License 2.0 | 6 votes |
/** * Process an intermediate form by carrying out, on the Lucene instance of * the shard, the deletes and the inserts (a ram index) in the form. * @param form the intermediate form containing deletes and a ram index * @throws IOException */ public void process(IntermediateForm form, FacetsConfig facetsConfig) throws IOException { if (facetsConfig != null) { DirectoryTaxonomyWriter.OrdinalMap map = new DirectoryTaxonomyWriter.MemoryOrdinalMap(); // merge the taxonomies taxoWriter.addTaxonomy(form.getTaxoDirectory(), map); int ordinalMap[] = map.getMap(); DirectoryReader reader = DirectoryReader.open(form.getDirectory()); try { List<AtomicReaderContext> leaves = reader.leaves(); int numReaders = leaves.size(); AtomicReader wrappedLeaves[] = new AtomicReader[numReaders]; for (int i = 0; i < numReaders; i++) { wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap, facetsConfig); } writer.addIndexes(new MultiReader(wrappedLeaves)); } finally { reader.close(); } } else { writer.addIndexes(new Directory[] { form.getDirectory() }); } numForms++; }
Example 2
Source File: IntermediateForm.java From linden with Apache License 2.0 | 5 votes |
/** * This method is used by the index update combiner and process an * intermediate form into the current intermediate form. More specifically, * the input intermediate forms are a single-document ram index and/or a * single delete term. * @param form the input intermediate form * @throws IOException */ public void process(IntermediateForm form, FacetsConfig facetsConfig) throws IOException { if (form.dir.ramBytesUsed() > 0 || form.taxoDir.ramBytesUsed() > 0) { if (writer == null) { createWriter(); } if (facetsConfig != null) { DirectoryTaxonomyWriter.OrdinalMap map = new DirectoryTaxonomyWriter.MemoryOrdinalMap(); // merge the taxonomies taxoWriter.addTaxonomy(form.taxoDir, map); int ordinalMap[] = map.getMap(); DirectoryReader reader = DirectoryReader.open(form.dir); try { List<AtomicReaderContext> leaves = reader.leaves(); int numReaders = leaves.size(); AtomicReader wrappedLeaves[] = new AtomicReader[numReaders]; for (int i = 0; i < numReaders; i++) { wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap, facetsConfig); } writer.addIndexes(new MultiReader(wrappedLeaves)); } finally { reader.close(); } } else { writer.addIndexes(new Directory[] { form.dir }); } numDocs++; } }
Example 3
Source File: GenericBlurRecordWriter.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private AtomicReader getAtomicReader(DirectoryReader reader) throws IOException { List<AtomicReaderContext> leaves = reader.leaves(); if (leaves.size() == 1) { return leaves.get(0).reader(); } throw new IOException("Reader [" + reader + "] has more than one segment after optimize."); }
Example 4
Source File: LookupBuilderReducer.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private long getTotalNumberOfRowIds(DirectoryReader reader) throws IOException { long total = 0; List<AtomicReaderContext> leaves = reader.leaves(); for (AtomicReaderContext context : leaves) { AtomicReader atomicReader = context.reader(); Terms terms = atomicReader.terms(BlurConstants.ROW_ID); long expectedInsertions = terms.size(); if (expectedInsertions < 0) { return -1; } total += expectedInsertions; } return total; }
Example 5
Source File: MergeSortRowIdMatcher.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private void createCacheFile(Path file, SegmentKey segmentKey) throws IOException { LOG.info("Building cache for segment [{0}] to [{1}]", segmentKey, file); Path tmpPath = getTmpWriterPath(file.getParent()); try (Writer writer = createWriter(_configuration, tmpPath)) { DirectoryReader reader = getReader(); for (AtomicReaderContext context : reader.leaves()) { SegmentReader segmentReader = AtomicReaderUtil.getSegmentReader(context.reader()); if (segmentReader.getSegmentName().equals(segmentKey.getSegmentName())) { writeRowIds(writer, segmentReader); break; } } } commitWriter(_configuration, file, tmpPath); }
Example 6
Source File: LuceneCorpusAdapter.java From Palmetto with GNU Affero General Public License v3.0 | 5 votes |
/** * Creates a corpus adapter which uses the Lucene index with the given path * and searches on the field with the given field name. * * @param indexPath * @param fieldName * @return * @throws CorruptIndexException * @throws IOException */ public static LuceneCorpusAdapter create(String indexPath, String fieldName) throws CorruptIndexException, IOException { DirectoryReader dirReader = DirectoryReader.open(new NIOFSDirectory(new File(indexPath))); List<AtomicReaderContext> leaves = dirReader.leaves(); AtomicReader reader[] = new AtomicReader[leaves.size()]; AtomicReaderContext contexts[] = new AtomicReaderContext[leaves.size()]; for (int i = 0; i < reader.length; i++) { contexts[i] = leaves.get(i); reader[i] = contexts[i].reader(); } return new LuceneCorpusAdapter(dirReader, reader, contexts, fieldName); }
Example 7
Source File: TestUninvertingReader.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testSortedSetEmptyIndex() throws IOException { final Directory dir = newDirectory(); final IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null)); iw.close(); final Map<String,Type> UNINVERT_MAP = new LinkedHashMap<String,Type>(); for (Type t : EnumSet.allOf(Type.class)) { UNINVERT_MAP.put(t.name(), t); } final DirectoryReader ir = UninvertingReader.wrap(DirectoryReader.open(dir), UNINVERT_MAP); TestUtil.checkReader(ir); final LeafReader composite = SlowCompositeReaderWrapper.wrap(ir); TestUtil.checkReader(composite); for (String f : UNINVERT_MAP.keySet()) { // check the leaves // (normally there are none for an empty index, so this is really just future // proofing in case that changes for some reason) for (LeafReaderContext rc : ir.leaves()) { final LeafReader ar = rc.reader(); assertNull(f + ": Expected no doc values from empty index (leaf)", ar.getSortedSetDocValues(f)); } // check the composite assertNull(f + ": Expected no doc values from empty index (composite)", composite.getSortedSetDocValues(f)); } ir.close(); dir.close(); }
Example 8
Source File: DirectUpdateHandler2.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public int mergeIndexes(MergeIndexesCommand cmd) throws IOException { TestInjection.injectDirectUpdateLatch(); mergeIndexesCommands.mark(); int rc; log.info("start {}", cmd); List<DirectoryReader> readers = cmd.readers; if (readers != null && readers.size() > 0) { List<CodecReader> mergeReaders = new ArrayList<>(); for (DirectoryReader reader : readers) { for (LeafReaderContext leaf : reader.leaves()) { mergeReaders.add(SlowCodecReaderWrapper.wrap(leaf.reader())); } } RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core); try { iw.get().addIndexes(mergeReaders.toArray(new CodecReader[mergeReaders.size()])); } finally { iw.decref(); } rc = 1; } else { rc = 0; } log.info("end_mergeIndexes"); // TODO: consider soft commit issues if (rc == 1 && commitTracker.getTimeUpperBound() > 0) { commitTracker.scheduleCommitWithin(commitTracker.getTimeUpperBound()); } else if (rc == 1 && softCommitTracker.getTimeUpperBound() > 0) { softCommitTracker.scheduleCommitWithin(softCommitTracker.getTimeUpperBound()); } return rc; }
Example 9
Source File: TestBlockPostingsFormat3.java From lucene-solr with Apache License 2.0 | 5 votes |
private void verify(Directory dir) throws Exception { DirectoryReader ir = DirectoryReader.open(dir); for (LeafReaderContext leaf : ir.leaves()) { LeafReader leafReader = leaf.reader(); assertTerms(leafReader.terms("field1docs"), leafReader.terms("field2freqs"), true); assertTerms(leafReader.terms("field3positions"), leafReader.terms("field4offsets"), true); assertTerms(leafReader.terms("field4offsets"), leafReader.terms("field5payloadsFixed"), true); assertTerms(leafReader.terms("field5payloadsFixed"), leafReader.terms("field6payloadsVariable"), true); assertTerms(leafReader.terms("field6payloadsVariable"), leafReader.terms("field7payloadsFixedOffsets"), true); assertTerms(leafReader.terms("field7payloadsFixedOffsets"), leafReader.terms("field8payloadsVariableOffsets"), true); } ir.close(); }
Example 10
Source File: TestLucene80DocValuesFormat.java From lucene-solr with Apache License 2.0 | 5 votes |
private void assertDVAdvance(Directory dir, int jumpStep) throws IOException { DirectoryReader ir = DirectoryReader.open(dir); TestUtil.checkReader(ir); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); for (int jump = jumpStep; jump < r.maxDoc(); jump += jumpStep) { // Create a new instance each time to ensure jumps from the beginning NumericDocValues docValues = DocValues.getNumeric(r, "dv"); for (int docID = 0; docID < r.maxDoc(); docID += jump) { String base = "document #" + docID + "/" + r.maxDoc() + ", jumping " + jump + " from #" + (docID-jump); String storedValue = r.document(docID).get("stored"); if (storedValue == null) { assertFalse("There should be no DocValue for " + base, docValues.advanceExact(docID)); } else { assertTrue("There should be a DocValue for " + base, docValues.advanceExact(docID)); assertEquals("The doc value should be correct for " + base, Long.parseLong(storedValue), docValues.longValue()); } } } } ir.close(); }
Example 11
Source File: TestUtil.java From lucene-solr with Apache License 2.0 | 5 votes |
public static void addIndexesSlowly(IndexWriter writer, DirectoryReader... readers) throws IOException { List<CodecReader> leaves = new ArrayList<>(); for (DirectoryReader reader : readers) { for (LeafReaderContext context : reader.leaves()) { leaves.add(SlowCodecReaderWrapper.wrap(context.reader())); } } writer.addIndexes(leaves.toArray(new CodecReader[leaves.size()])); }
Example 12
Source File: DirectoryTaxonomyWriter.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Takes the categories from the given taxonomy directory, and adds the * missing ones to this taxonomy. Additionally, it fills the given * {@link OrdinalMap} with a mapping from the original ordinal to the new * ordinal. */ public void addTaxonomy(Directory taxoDir, OrdinalMap map) throws IOException { ensureOpen(); DirectoryReader r = DirectoryReader.open(taxoDir); try { final int size = r.numDocs(); final OrdinalMap ordinalMap = map; ordinalMap.setSize(size); int base = 0; PostingsEnum docs = null; for (final LeafReaderContext ctx : r.leaves()) { final LeafReader ar = ctx.reader(); final Terms terms = ar.terms(Consts.FULL); // TODO: share per-segment TermsEnum here! TermsEnum te = terms.iterator(); while (te.next() != null) { FacetLabel cp = new FacetLabel(FacetsConfig.stringToPath(te.term().utf8ToString())); final int ordinal = addCategory(cp); docs = te.postings(docs, PostingsEnum.NONE); ordinalMap.addMapping(docs.nextDoc() + base, ordinal); } base += ar.maxDoc(); // no deletions, so we're ok } ordinalMap.addDone(); } finally { r.close(); } }
Example 13
Source File: EngineTestCase.java From crate with Apache License 2.0 | 5 votes |
static long maxSeqNosInReader(DirectoryReader reader) throws IOException { long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; for (LeafReaderContext leaf : reader.leaves()) { final NumericDocValues seqNoDocValues = leaf.reader().getNumericDocValues(SeqNoFieldMapper.NAME); while (seqNoDocValues.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { maxSeqNo = SequenceNumbers.max(maxSeqNo, seqNoDocValues.longValue()); } } return maxSeqNo; }
Example 14
Source File: AlfrescoLukeRequestHandler.java From SearchServices with GNU Lesser General Public License v3.0 | 5 votes |
/** Returns the sum of RAM bytes used by each segment */ private static long getIndexHeapUsed(DirectoryReader reader) { long indexHeapRamBytesUsed = 0; for (LeafReaderContext leafReaderContext : reader.leaves()) { LeafReader leafReader = leafReaderContext.reader(); if (leafReader instanceof SegmentReader) { indexHeapRamBytesUsed += ((SegmentReader) leafReader) .ramBytesUsed(); } else { // Not supported for any reader that is not a SegmentReader return -1; } } return indexHeapRamBytesUsed; }
Example 15
Source File: TestLucene80DocValuesFormat.java From lucene-solr with Apache License 2.0 | 4 votes |
private void doTestSortedNumericBlocksOfVariousBitsPerValue(LongSupplier counts) throws Exception { Directory dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())); conf.setMaxBufferedDocs(atLeast(Lucene80DocValuesFormat.NUMERIC_BLOCK_SIZE)); conf.setRAMBufferSizeMB(-1); conf.setMergePolicy(newLogMergePolicy(random().nextBoolean())); IndexWriter writer = new IndexWriter(dir, conf); final int numDocs = atLeast(Lucene80DocValuesFormat.NUMERIC_BLOCK_SIZE*3); final LongSupplier values = blocksOfVariousBPV(); for (int i = 0; i < numDocs; i++) { Document doc = new Document(); int valueCount = (int) counts.getAsLong(); long valueArray[] = new long[valueCount]; for (int j = 0; j < valueCount; j++) { long value = values.getAsLong(); valueArray[j] = value; doc.add(new SortedNumericDocValuesField("dv", value)); } Arrays.sort(valueArray); for (int j = 0; j < valueCount; j++) { doc.add(new StoredField("stored", Long.toString(valueArray[j]))); } writer.addDocument(doc); if (random().nextInt(31) == 0) { writer.commit(); } } writer.forceMerge(1); writer.close(); // compare DirectoryReader ir = DirectoryReader.open(dir); TestUtil.checkReader(ir); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); SortedNumericDocValues docValues = DocValues.getSortedNumeric(r, "dv"); for (int i = 0; i < r.maxDoc(); i++) { if (i > docValues.docID()) { docValues.nextDoc(); } String expected[] = r.document(i).getValues("stored"); if (i < docValues.docID()) { assertEquals(0, expected.length); } else { String actual[] = new String[docValues.docValueCount()]; for (int j = 0; j < actual.length; j++) { actual[j] = Long.toString(docValues.nextValue()); } assertArrayEquals(expected, actual); } } } ir.close(); dir.close(); }
Example 16
Source File: TestFieldCacheVsDocValues.java From lucene-solr with Apache License 2.0 | 4 votes |
private void doTestSortedVsFieldCache(int minLength, int maxLength) throws Exception { Directory dir = newDirectory(); IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())); RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf); Document doc = new Document(); Field idField = new StringField("id", "", Field.Store.NO); Field indexedField = new StringField("indexed", "", Field.Store.NO); Field dvField = new SortedDocValuesField("dv", new BytesRef()); doc.add(idField); doc.add(indexedField); doc.add(dvField); // index some docs int numDocs = atLeast(300); for (int i = 0; i < numDocs; i++) { idField.setStringValue(Integer.toString(i)); final int length; if (minLength == maxLength) { length = minLength; // fixed length } else { length = TestUtil.nextInt(random(), minLength, maxLength); } String value = TestUtil.randomSimpleString(random(), length); indexedField.setStringValue(value); dvField.setBytesValue(new BytesRef(value)); writer.addDocument(doc); if (random().nextInt(31) == 0) { writer.commit(); } } // delete some docs int numDeletions = random().nextInt(numDocs/10); for (int i = 0; i < numDeletions; i++) { int id = random().nextInt(numDocs); writer.deleteDocuments(new Term("id", Integer.toString(id))); } writer.close(); // compare DirectoryReader ir = DirectoryReader.open(dir); for (LeafReaderContext context : ir.leaves()) { LeafReader r = context.reader(); SortedDocValues expected = FieldCache.DEFAULT.getTermsIndex(r, "indexed"); SortedDocValues actual = r.getSortedDocValues("dv"); assertEquals(r.maxDoc(), expected, actual); } ir.close(); dir.close(); }
Example 17
Source File: TestDocTermOrds.java From lucene-solr with Apache License 2.0 | 4 votes |
public void testRandom() throws Exception { Directory dir = newDirectory(); final int NUM_TERMS = atLeast(20); final Set<BytesRef> terms = new HashSet<>(); while(terms.size() < NUM_TERMS) { final String s = TestUtil.randomRealisticUnicodeString(random()); //final String s = _TestUtil.randomSimpleString(random); if (s.length() > 0) { terms.add(new BytesRef(s)); } } final BytesRef[] termsArray = terms.toArray(new BytesRef[terms.size()]); Arrays.sort(termsArray); final int NUM_DOCS = atLeast(100); IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random())); // Sometimes swap in codec that impls ord(): if (random().nextInt(10) == 7) { // Make sure terms index has ords: Codec codec = TestUtil.alwaysPostingsFormat(TestUtil.getPostingsFormatWithOrds(random())); conf.setCodec(codec); } final RandomIndexWriter w = new RandomIndexWriter(random(), dir, conf); final int[][] idToOrds = new int[NUM_DOCS][]; final Set<Integer> ordsForDocSet = new HashSet<>(); for(int id=0;id<NUM_DOCS;id++) { Document doc = new Document(); doc.add(new LegacyIntField("id", id, Field.Store.YES)); final int termCount = TestUtil.nextInt(random(), 0, 20 * RANDOM_MULTIPLIER); while(ordsForDocSet.size() < termCount) { ordsForDocSet.add(random().nextInt(termsArray.length)); } final int[] ordsForDoc = new int[termCount]; int upto = 0; if (VERBOSE) { System.out.println("TEST: doc id=" + id); } for(int ord : ordsForDocSet) { ordsForDoc[upto++] = ord; Field field = newStringField("field", termsArray[ord].utf8ToString(), Field.Store.NO); if (VERBOSE) { System.out.println(" f=" + termsArray[ord].utf8ToString()); } doc.add(field); } ordsForDocSet.clear(); Arrays.sort(ordsForDoc); idToOrds[id] = ordsForDoc; w.addDocument(doc); } final DirectoryReader r = w.getReader(); w.close(); if (VERBOSE) { System.out.println("TEST: reader=" + r); } for(LeafReaderContext ctx : r.leaves()) { if (VERBOSE) { System.out.println("\nTEST: sub=" + ctx.reader()); } verify(ctx.reader(), idToOrds, termsArray, null); } // Also test top-level reader: its enum does not support // ord, so this forces the OrdWrapper to run: if (VERBOSE) { System.out.println("TEST: top reader"); } LeafReader slowR = SlowCompositeReaderWrapper.wrap(r); TestUtil.checkReader(slowR); verify(slowR, idToOrds, termsArray, null); FieldCache.DEFAULT.purgeByCacheKey(slowR.getCoreCacheHelper().getKey()); r.close(); dir.close(); }
Example 18
Source File: TestHalfAndHalfDocValues.java From lucene-solr with Apache License 2.0 | 4 votes |
public void testHalfAndHalfDocValues() throws Exception { // Insert two docs without docvalues String fieldname = "string_add_dv_later"; assertU(adoc("id", "3", fieldname, "c")); assertU(commit()); assertU(adoc("id", "1", fieldname, "a")); assertU(commit()); try (SolrCore core = h.getCoreInc()) { assertFalse(core.getLatestSchema().getField(fieldname).hasDocValues()); // Add docvalues to the field type IndexSchema schema = core.getLatestSchema(); SchemaField oldField = schema.getField(fieldname); int newProperties = oldField.getProperties() | SchemaField.DOC_VALUES; SchemaField sf = new SchemaField(fieldname, oldField.getType(), newProperties, null); schema.getFields().put(fieldname, sf); // Insert a new doc with docvalues assertU(adoc("id", "2", fieldname, "b")); assertU(commit()); // Check there are a mix of segments with and without docvalues final RefCounted<SolrIndexSearcher> searcherRef = core.openNewSearcher(true, true); final SolrIndexSearcher searcher = searcherRef.get(); try { final DirectoryReader topReader = searcher.getRawReader(); //Assert no merges assertEquals(3, topReader.numDocs()); assertEquals(3, topReader.leaves().size()); final FieldInfos infos = FieldInfos.getMergedFieldInfos(topReader); //The global field type should have docValues because a document with dvs was added assertEquals(DocValuesType.SORTED, infos.fieldInfo(fieldname).getDocValuesType()); for (LeafReaderContext ctx : topReader.leaves()) { LeafReader r = ctx.reader(); //Make sure there were no merges assertEquals(1, r.numDocs()); Document doc = r.document(0); String id = doc.getField("id").stringValue(); if (id.equals("1") || id.equals("3")) { assertEquals(DocValuesType.NONE, r.getFieldInfos().fieldInfo(fieldname).getDocValuesType()); } else { assertEquals(DocValuesType.SORTED, r.getFieldInfos().fieldInfo(fieldname).getDocValuesType()); } } } finally { searcherRef.decref(); } } // Assert sort order is correct assertQ(req("q", "string_add_dv_later:*", "sort", "string_add_dv_later asc"), "//*[@numFound='3']", "//result/doc[1]/str[@name='id'][.=1]", "//result/doc[2]/str[@name='id'][.=2]", "//result/doc[3]/str[@name='id'][.=3]" ); }
Example 19
Source File: DirectoryTaxonomyWriter.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * Look up the given category in the cache and/or the on-disk storage, * returning the category's ordinal, or a negative number in case the * category does not yet exist in the taxonomy. */ protected synchronized int findCategory(FacetLabel categoryPath) throws IOException { // If we can find the category in the cache, or we know the cache is // complete, we can return the response directly from it int res = cache.get(categoryPath); if (res >= 0 || cacheIsComplete) { return res; } cacheMisses.incrementAndGet(); // After a few cache misses, it makes sense to read all the categories // from disk and into the cache. The reason not to do this on the first // cache miss (or even when opening the writer) is that it will // significantly slow down the case when a taxonomy is opened just to // add one category. The idea only spending a long time on reading // after enough time was spent on cache misses is known as an "online // algorithm". perhapsFillCache(); res = cache.get(categoryPath); if (res >= 0 || cacheIsComplete) { // if after filling the cache from the info on disk, the category is in it // or the cache is complete, return whatever cache.get returned. return res; } // if we get here, it means the category is not in the cache, and it is not // complete, and therefore we must look for the category on disk. // We need to get an answer from the on-disk index. initReaderManager(); int doc = -1; DirectoryReader reader = readerManager.acquire(); try { final BytesRef catTerm = new BytesRef(FacetsConfig.pathToString(categoryPath.components, categoryPath.length)); PostingsEnum docs = null; // reuse for (LeafReaderContext ctx : reader.leaves()) { Terms terms = ctx.reader().terms(Consts.FULL); if (terms != null) { // TODO: share per-segment TermsEnum here! TermsEnum termsEnum = terms.iterator(); if (termsEnum.seekExact(catTerm)) { // liveDocs=null because the taxonomy has no deletes docs = termsEnum.postings(docs, 0 /* freqs not required */); // if the term was found, we know it has exactly one document. doc = docs.nextDoc() + ctx.docBase; break; } } } } finally { readerManager.release(reader); } if (doc > 0) { addToCache(categoryPath, doc); } return doc; }
Example 20
Source File: SecureAtomicReaderTestBase.java From incubator-retired-blur with Apache License 2.0 | 4 votes |
private AtomicReader createAtomicReader() throws IOException { DirectoryReader reader = createReader(); List<AtomicReaderContext> leaves = reader.leaves(); return leaves.get(0).reader(); }