Java Code Examples for org.apache.lucene.index.LeafReaderContext#reader()

The following examples show how to use org.apache.lucene.index.LeafReaderContext#reader() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CodecCollector.java    From mtas with Apache License 2.0 6 votes vote down vote up
/**
 * Collect collection.
 *
 * @param reader
 *          the reader
 * @param docSet
 *          the doc set
 * @param collectionInfo
 *          the collection info
 * @throws IOException
 *           Signals that an I/O exception has occurred.
 */
public static void collectCollection(IndexReader reader, List<Integer> docSet,
    ComponentCollection collectionInfo) throws IOException {
  if (collectionInfo.action().equals(ComponentCollection.ACTION_CHECK)) {
    // can't do anything in lucene for check
  } else if (collectionInfo.action()
      .equals(ComponentCollection.ACTION_LIST)) {
    // can't do anything in lucene for list
  } else if (collectionInfo.action()
      .equals(ComponentCollection.ACTION_CREATE)) {
    BytesRef term = null;
    PostingsEnum postingsEnum = null;
    Integer docId;
    Integer termDocId = -1;
    Terms terms;
    LeafReaderContext lrc;
    LeafReader r;
    ListIterator<LeafReaderContext> iterator = reader.leaves().listIterator();
    while (iterator.hasNext()) {
      lrc = iterator.next();
      r = lrc.reader();
      for (String field : collectionInfo.fields()) {
        if ((terms = r.terms(field)) != null) {
          TermsEnum termsEnum = terms.iterator();
          while ((term = termsEnum.next()) != null) {
            Iterator<Integer> docIterator = docSet.iterator();
            postingsEnum = termsEnum.postings(postingsEnum,
                PostingsEnum.NONE);
            termDocId = -1;
            while (docIterator.hasNext()) {
              docId = docIterator.next() - lrc.docBase;
              if ((docId >= termDocId) && ((docId.equals(termDocId))
                  || ((termDocId = postingsEnum.advance(docId))
                      .equals(docId)))) {
                collectionInfo.addValue(term.utf8ToString());
                break;
              }
              if (termDocId.equals(PostingsEnum.NO_MORE_DOCS)) {
                break;
              }
            }
          }
        }
      }
    }
  }
}
 
Example 2
Source File: GeoPointArrayIndexFieldData.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
@Override
public AtomicGeoPointFieldData loadDirect(LeafReaderContext context) throws Exception {
    LeafReader reader = context.reader();

    Terms terms = reader.terms(getFieldNames().indexName());
    AtomicGeoPointFieldData data = null;
    // TODO: Use an actual estimator to estimate before loading.
    NonEstimatingEstimator estimator = new NonEstimatingEstimator(breakerService.getBreaker(CircuitBreaker.FIELDDATA));
    if (terms == null) {
        data = AbstractAtomicGeoPointFieldData.empty(reader.maxDoc());
        estimator.afterLoad(null, data.ramBytesUsed());
        return data;
    }
    return (Version.indexCreated(indexSettings).before(Version.V_2_2_0)) ?
        loadLegacyFieldData(reader, estimator, terms, data) : loadFieldData22(reader, estimator, terms, data);
}
 
Example 3
Source File: StatisHelper.java    From HongsCORE with MIT License 5 votes vote down vote up
@Override
public LeafCollector getLeafCollector(LeafReaderContext lrc) throws IOException {
    LeafReader reader = lrc.reader( );

    for (int i = 0; i < fields.length; i ++) {
        if (groups[i][1] == 1) {
            values[i] = reader.getSortedNumericDocValues("%"+fields[i]);
        } else {
            values[i] = reader.      getNumericDocValues("#"+fields[i]);
        }
    }

    return this;
}
 
Example 4
Source File: Geo3DPointOutsideDistanceComparator.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException {
  LeafReader reader = context.reader();
  FieldInfo info = reader.getFieldInfos().fieldInfo(field);
  if (info != null) {
    Geo3DDocValuesField.checkCompatible(info);
  }
  currentDocs = DocValues.getSortedNumeric(reader, field);
  return this;
}
 
Example 5
Source File: BBoxValueSource.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public ShapeValues getValues(LeafReaderContext readerContext) throws IOException {
  LeafReader reader = readerContext.reader();
  final NumericDocValues minX = DocValues.getNumeric(reader, strategy.field_minX);
  final NumericDocValues minY = DocValues.getNumeric(reader, strategy.field_minY);
  final NumericDocValues maxX = DocValues.getNumeric(reader, strategy.field_maxX);
  final NumericDocValues maxY = DocValues.getNumeric(reader, strategy.field_maxY);

  //reused
  final Rectangle rect = strategy.getSpatialContext().getShapeFactory().rect(0,0,0,0);

  return new ShapeValues() {

    @Override
    public boolean advanceExact(int doc) throws IOException {
      return minX.advanceExact(doc) && minY.advanceExact(doc) && maxX.advanceExact(doc) && maxY.advanceExact(doc);
    }

    @Override
    public Shape value() throws IOException {
      double minXValue = Double.longBitsToDouble(minX.longValue());
      double minYValue = Double.longBitsToDouble(minY.longValue());
      double maxXValue = Double.longBitsToDouble(maxX.longValue());
      double maxYValue = Double.longBitsToDouble(maxY.longValue());
      rect.reset(minXValue, maxXValue, minYValue, maxYValue);
      return rect;
    }

  };
}
 
Example 6
Source File: IndexSizeEstimator.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void estimateStoredFields(Map<String, Object> result) throws IOException {
  log.info("- estimating stored fields...");
  Map<String, Map<String, Object>> stats = new HashMap<>();
  for (LeafReaderContext context : reader.leaves()) {
    LeafReader leafReader = context.reader();
    EstimatingVisitor visitor = new EstimatingVisitor(stats, topN, maxLength, samplingStep);
    Bits liveDocs = leafReader.getLiveDocs();
    if (leafReader instanceof CodecReader) {
      CodecReader codecReader = (CodecReader)leafReader;
      StoredFieldsReader storedFieldsReader = codecReader.getFieldsReader();
      // this instance may be faster for a full sequential pass
      StoredFieldsReader mergeInstance = storedFieldsReader.getMergeInstance();
      for (int docId = 0; docId < leafReader.maxDoc(); docId += samplingStep) {
        if (liveDocs != null && !liveDocs.get(docId)) {
          continue;
        }
        mergeInstance.visitDocument(docId, visitor);
      }
      if (mergeInstance != storedFieldsReader) {
        mergeInstance.close();
      }
    } else {
      for (int docId = 0; docId < leafReader.maxDoc(); docId += samplingStep) {
        if (liveDocs != null && !liveDocs.get(docId)) {
          continue;
        }
        leafReader.document(docId, visitor);
      }
    }
  }
  result.put(STORED_FIELDS, stats);
}
 
Example 7
Source File: DirectoryTaxonomyWriter.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Takes the categories from the given taxonomy directory, and adds the
 * missing ones to this taxonomy. Additionally, it fills the given
 * {@link OrdinalMap} with a mapping from the original ordinal to the new
 * ordinal.
 */
public void addTaxonomy(Directory taxoDir, OrdinalMap map) throws IOException {
  ensureOpen();
  DirectoryReader r = DirectoryReader.open(taxoDir);
  try {
    final int size = r.numDocs();
    final OrdinalMap ordinalMap = map;
    ordinalMap.setSize(size);
    int base = 0;
    PostingsEnum docs = null;
    for (final LeafReaderContext ctx : r.leaves()) {
      final LeafReader ar = ctx.reader();
      final Terms terms = ar.terms(Consts.FULL);
      // TODO: share per-segment TermsEnum here!
      TermsEnum te = terms.iterator();
      while (te.next() != null) {
        FacetLabel cp = new FacetLabel(FacetsConfig.stringToPath(te.term().utf8ToString()));
        final int ordinal = addCategory(cp);
        docs = te.postings(docs, PostingsEnum.NONE);
        ordinalMap.addMapping(docs.nextDoc() + base, ordinal);
      }
      base += ar.maxDoc(); // no deletions, so we're ok
    }
    ordinalMap.addDone();
  } finally {
    r.close();
  }
}
 
Example 8
Source File: SourceLookup.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public void setSegmentAndDocument(LeafReaderContext context, int docId) {
    if (this.reader == context.reader() && this.docId == docId) {
        // if we are called with the same document, don't invalidate source
        return;
    }
    this.reader = context.reader();
    this.source = null;
    this.sourceAsBytes = null;
    this.docId = docId;
}
 
Example 9
Source File: CheckJoinIndex.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Check that the given index is good to use for block joins.
 * @throws IllegalStateException if the index does not have an appropriate structure
 */
public static void check(IndexReader reader, BitSetProducer parentsFilter) throws IOException {
  for (LeafReaderContext context : reader.leaves()) {
    if (context.reader().maxDoc() == 0) {
      continue;
    }
    final BitSet parents = parentsFilter.getBitSet(context);
    if (parents == null || parents.cardinality() == 0) {
      throw new IllegalStateException("Every segment should have at least one parent, but " + context.reader() + " does not have any");
    }
    if (parents.get(context.reader().maxDoc() - 1) == false) {
      throw new IllegalStateException("The last document of a segment must always be a parent, but " + context.reader() + " has a child as a last doc");
    }
    final Bits liveDocs = context.reader().getLiveDocs();
    if (liveDocs != null) {
      int prevParentDoc = -1;
      DocIdSetIterator it = new BitSetIterator(parents, 0L);
      for (int parentDoc = it.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS; parentDoc = it.nextDoc()) {
        final boolean parentIsLive = liveDocs.get(parentDoc);
        for (int child = prevParentDoc + 1; child != parentDoc; child++) {
          final boolean childIsLive = liveDocs.get(child);
          if (parentIsLive != childIsLive) {
            if (childIsLive) {
              throw new IllegalStateException("Parent doc " + parentDoc + " of segment " + context.reader() + " is live but has a deleted child document " + child);
            } else {
              throw new IllegalStateException("Parent doc " + parentDoc + " of segment " + context.reader() + " is deleted but has a live child document " + child);
            }
          }
        }
        prevParentDoc = parentDoc;
      }
    }
  }
}
 
Example 10
Source File: TestMultipleIndexFields.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void assertOrdinalsExist(String field, IndexReader ir) throws IOException {
  for (LeafReaderContext context : ir.leaves()) {
    LeafReader r = context.reader();
    if (r.getBinaryDocValues(field) != null) {
      return; // not all segments must have this DocValues
    }
  }
  fail("no ordinals found for " + field);
}
 
Example 11
Source File: LatLonPointDistanceComparator.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException {
  LeafReader reader = context.reader();
  FieldInfo info = reader.getFieldInfos().fieldInfo(field);
  if (info != null) {
    LatLonDocValuesField.checkCompatible(info);
  }
  currentDocs = DocValues.getSortedNumeric(reader, field);
  valuesDocID = -1;
  return this;
}
 
Example 12
Source File: TestLucene80DocValuesFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void assertDVAdvance(Directory dir, int jumpStep) throws IOException {
  DirectoryReader ir = DirectoryReader.open(dir);
  TestUtil.checkReader(ir);
  for (LeafReaderContext context : ir.leaves()) {
    LeafReader r = context.reader();


    for (int jump = jumpStep; jump < r.maxDoc(); jump += jumpStep) {
      // Create a new instance each time to ensure jumps from the beginning
      NumericDocValues docValues = DocValues.getNumeric(r, "dv");
      for (int docID = 0; docID < r.maxDoc(); docID += jump) {
        String base = "document #" + docID + "/" + r.maxDoc() + ", jumping " + jump + " from #" + (docID-jump);
        String storedValue = r.document(docID).get("stored");
        if (storedValue == null) {
          assertFalse("There should be no DocValue for " + base,
              docValues.advanceExact(docID));
        } else {
          assertTrue("There should be a DocValue for " + base,
              docValues.advanceExact(docID));
          assertEquals("The doc value should be correct for " + base,
              Long.parseLong(storedValue), docValues.longValue());
        }
      }
    }
  }
  ir.close();
}
 
Example 13
Source File: RptWithGeometrySpatialField.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
public ShapeValues getValues(LeafReaderContext readerContext) throws IOException {
  final ShapeValues targetFuncValues = targetValueSource.getValues(readerContext);
  // The key is a pair of leaf reader with a docId relative to that reader. The value is a Map from field to Shape.
  @SuppressWarnings({"unchecked"})
  final SolrCache<PerSegCacheKey,Shape> cache =
      SolrRequestInfo.getRequestInfo().getReq().getSearcher().getCache(CACHE_KEY_PREFIX + fieldName);
  if (cache == null) {
    return targetFuncValues; // no caching; no configured cache
  }

  return new ShapeValues() {
    int docId = -1;

    @Override
    public Shape value() throws IOException {
      //lookup in cache
      IndexReader.CacheHelper cacheHelper = readerContext.reader().getCoreCacheHelper();
      if (cacheHelper == null) {
        throw new IllegalStateException("Leaf " + readerContext.reader() + " is not suited for caching");
      }
      PerSegCacheKey key = new PerSegCacheKey(cacheHelper.getKey(), docId);
      Shape shape = cache.computeIfAbsent(key, k -> {
        try {
          return targetFuncValues.value();
        } catch (IOException e) {
          return null;
        }
      });
      if (shape != null) {
        //optimize shape on a cache hit if possible. This must be thread-safe and it is.
        if (shape instanceof JtsGeometry) {
          ((JtsGeometry) shape).index(); // TODO would be nice if some day we didn't have to cast
        }
      }
      return shape;
    }

    @Override
    public boolean advanceExact(int doc) throws IOException {
      this.docId = doc;
      return targetFuncValues.advanceExact(doc);
    }

  };

}
 
Example 14
Source File: BinaryDVIndexFieldData.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public BinaryDVAtomicFieldData load(LeafReaderContext context) {
    return new BinaryDVAtomicFieldData(context.reader(), fieldNames.indexName());
}
 
Example 15
Source File: QueriesLoaderCollector.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
    reader = context.reader();
    uidValues = uidFieldData.load(context).getBytesValues();
}
 
Example 16
Source File: SegmentsInfoRequestHandler.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private SimpleOrderedMap<Object> getSegmentInfo(
    SegmentCommitInfo segmentCommitInfo, boolean withSizeInfo, boolean withFieldInfos,
    List<LeafReaderContext> leafContexts, IndexSchema schema) throws IOException {
  SimpleOrderedMap<Object> segmentInfoMap = new SimpleOrderedMap<>();

  segmentInfoMap.add(NAME, segmentCommitInfo.info.name);
  segmentInfoMap.add("delCount", segmentCommitInfo.getDelCount());
  segmentInfoMap.add("softDelCount", segmentCommitInfo.getSoftDelCount());
  segmentInfoMap.add("hasFieldUpdates", segmentCommitInfo.hasFieldUpdates());
  segmentInfoMap.add("sizeInBytes", segmentCommitInfo.sizeInBytes());
  segmentInfoMap.add("size", segmentCommitInfo.info.maxDoc());
  Long timestamp = Long.parseLong(segmentCommitInfo.info.getDiagnostics()
      .get("timestamp"));
  segmentInfoMap.add("age", new Date(timestamp));
  segmentInfoMap.add("source",
      segmentCommitInfo.info.getDiagnostics().get("source"));
  segmentInfoMap.add("version", segmentCommitInfo.info.getVersion().toString());
  // don't open a new SegmentReader - try to find the right one from the leaf contexts
  SegmentReader seg = null;
  for (LeafReaderContext lrc : leafContexts) {
    LeafReader leafReader = lrc.reader();
    leafReader = FilterLeafReader.unwrap(leafReader);
    if (leafReader instanceof SegmentReader) {
      SegmentReader sr = (SegmentReader)leafReader;
      if (sr.getSegmentInfo().info.equals(segmentCommitInfo.info)) {
        seg = sr;
        break;
      }
    }
  }
  if (seg != null) {
    LeafMetaData metaData = seg.getMetaData();
    if (metaData != null) {
      segmentInfoMap.add("createdVersionMajor", metaData.getCreatedVersionMajor());
      segmentInfoMap.add("minVersion", metaData.getMinVersion().toString());
      if (metaData.getSort() != null) {
        segmentInfoMap.add("sort", metaData.getSort().toString());
      }
    }
  }
  if (!segmentCommitInfo.info.getDiagnostics().isEmpty()) {
    segmentInfoMap.add("diagnostics", segmentCommitInfo.info.getDiagnostics());
  }
  if (!segmentCommitInfo.info.getAttributes().isEmpty()) {
    segmentInfoMap.add("attributes", segmentCommitInfo.info.getAttributes());
  }
  if (withSizeInfo) {
    Directory dir = segmentCommitInfo.info.dir;
    List<Pair<String, Long>> files = segmentCommitInfo.files().stream()
        .map(f -> {
          long size = -1;
          try {
            size = dir.fileLength(f);
          } catch (IOException e) {
          }
          return new Pair<String, Long>(f, size);
        }).sorted((p1, p2) -> {
          if (p1.second() > p2.second()) {
            return -1;
          } else if (p1.second() < p2.second()) {
            return 1;
          } else {
            return 0;
          }
        }).collect(Collectors.toList());
    if (!files.isEmpty()) {
      SimpleOrderedMap<Object> topFiles = new SimpleOrderedMap<>();
      for (int i = 0; i < Math.min(files.size(), 5); i++) {
        Pair<String, Long> p = files.get(i);
        topFiles.add(p.first(), RamUsageEstimator.humanReadableUnits(p.second()));
      }
      segmentInfoMap.add("largestFiles", topFiles);
    }
  }
  if (seg != null && withSizeInfo) {
    SimpleOrderedMap<Object> ram = new SimpleOrderedMap<>();
    ram.add("total", seg.ramBytesUsed());
    for (Accountable ac : seg.getChildResources()) {
      accountableToMap(ac, ram::add);
    }
    segmentInfoMap.add("ramBytesUsed", ram);
  }
  if (withFieldInfos) {
    if (seg == null) {
      log.debug("Skipping segment info - not available as a SegmentReader: {}", segmentCommitInfo);
    } else {
      FieldInfos fis = seg.getFieldInfos();
      SimpleOrderedMap<Object> fields = new SimpleOrderedMap<>();
      for (FieldInfo fi : fis) {
        fields.add(fi.name, getFieldInfo(seg, fi, schema));
      }
      segmentInfoMap.add("fields", fields);
    }
  }

  return segmentInfoMap;
}
 
Example 17
Source File: IndexSizeEstimatorTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
public void testEstimator() throws Exception {
  JettySolrRunner jetty = cluster.getRandomJetty(random());
  String randomCoreName = jetty.getCoreContainer().getAllCoreNames().iterator().next();
  SolrCore core = jetty.getCoreContainer().getCore(randomCoreName);
  RefCounted<SolrIndexSearcher> searcherRef = core.getSearcher();
  try {
    SolrIndexSearcher searcher = searcherRef.get();
    // limit the max length
    IndexSizeEstimator estimator = new IndexSizeEstimator(searcher.getRawReader(), 20, 50, true, true);
    IndexSizeEstimator.Estimate estimate = estimator.estimate();
    Map<String, Long> fieldsBySize = estimate.getFieldsBySize();
    assertFalse("empty fieldsBySize", fieldsBySize.isEmpty());
    assertEquals(fieldsBySize.toString(), fields.size(), fieldsBySize.size());
    fieldsBySize.forEach((k, v) -> assertTrue("unexpected size of " + k + ": " + v, v > 0));
    Map<String, Long> typesBySize = estimate.getTypesBySize();
    assertFalse("empty typesBySize", typesBySize.isEmpty());
    assertTrue("expected at least 8 types: " + typesBySize.toString(), typesBySize.size() >= 8);
    typesBySize.forEach((k, v) -> assertTrue("unexpected size of " + k + ": " + v, v > 0));
    Map<String, Object> summary = estimate.getSummary();
    assertNotNull("summary", summary);
    assertFalse("empty summary", summary.isEmpty());
    assertEquals(summary.keySet().toString(), fields.size(), summary.keySet().size());
    Map<String, Object> details = estimate.getDetails();
    assertNotNull("details", details);
    assertFalse("empty details", details.isEmpty());
    // by type
    assertEquals(details.keySet().toString(), 6, details.keySet().size());

    // check sampling
    estimator.setSamplingThreshold(searcher.getRawReader().maxDoc() / 2);
    IndexSizeEstimator.Estimate sampledEstimate = estimator.estimate();
    Map<String, Long> sampledFieldsBySize = sampledEstimate.getFieldsBySize();
    assertFalse("empty fieldsBySize", sampledFieldsBySize.isEmpty());
    // verify that the sampled values are within 50% of the original values
    fieldsBySize.forEach((field, size) -> {
      Long sampledSize = sampledFieldsBySize.get(field);
      assertNotNull("sampled size for " + field + " is missing in " + sampledFieldsBySize, sampledSize);
      double delta = (double) size * 0.5;
      assertEquals("sampled size of " + field + " is wildly off", (double)size, (double)sampledSize, delta);
    });
    // verify the reader is still usable - SOLR-13694
    IndexReader reader = searcher.getRawReader();
    for (LeafReaderContext context : reader.leaves()) {
      LeafReader leafReader = context.reader();
      assertTrue("unexpected LeafReader class: " + leafReader.getClass().getName(), leafReader instanceof CodecReader);
      Bits liveDocs = leafReader.getLiveDocs();
      CodecReader codecReader = (CodecReader) leafReader;
      StoredFieldsReader storedFieldsReader = codecReader.getFieldsReader();
      StoredFieldVisitor visitor = new DocumentStoredFieldVisitor();
      assertNotNull(storedFieldsReader);
      for (int docId = 0; docId < leafReader.maxDoc(); docId++) {
        if (liveDocs != null && !liveDocs.get(docId)) {
          continue;
        }
        storedFieldsReader.visitDocument(docId, visitor);
      }
    }
  } finally {
    searcherRef.decref();
    core.close();
  }
}
 
Example 18
Source File: FieldVisitorCollector.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
protected void doSetNextReader(LeafReaderContext context) throws IOException {
    super.doSetNextReader(context);
    collector.getLeafCollector(context);
    currentReader = context.reader();
}
 
Example 19
Source File: TestLucene80DocValuesFormat.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private void doTestSortedNumericBlocksOfVariousBitsPerValue(LongSupplier counts) throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  conf.setMaxBufferedDocs(atLeast(Lucene80DocValuesFormat.NUMERIC_BLOCK_SIZE));
  conf.setRAMBufferSizeMB(-1);
  conf.setMergePolicy(newLogMergePolicy(random().nextBoolean()));
  IndexWriter writer = new IndexWriter(dir, conf);
  
  final int numDocs = atLeast(Lucene80DocValuesFormat.NUMERIC_BLOCK_SIZE*3);
  final LongSupplier values = blocksOfVariousBPV();
  for (int i = 0; i < numDocs; i++) {
    Document doc = new Document();
    
    int valueCount = (int) counts.getAsLong();
    long valueArray[] = new long[valueCount];
    for (int j = 0; j < valueCount; j++) {
      long value = values.getAsLong();
      valueArray[j] = value;
      doc.add(new SortedNumericDocValuesField("dv", value));
    }
    Arrays.sort(valueArray);
    for (int j = 0; j < valueCount; j++) {
      doc.add(new StoredField("stored", Long.toString(valueArray[j])));
    }
    writer.addDocument(doc);
    if (random().nextInt(31) == 0) {
      writer.commit();
    }
  }
  writer.forceMerge(1);

  writer.close();
  
  // compare
  DirectoryReader ir = DirectoryReader.open(dir);
  TestUtil.checkReader(ir);
  for (LeafReaderContext context : ir.leaves()) {
    LeafReader r = context.reader();
    SortedNumericDocValues docValues = DocValues.getSortedNumeric(r, "dv");
    for (int i = 0; i < r.maxDoc(); i++) {
      if (i > docValues.docID()) {
        docValues.nextDoc();
      }
      String expected[] = r.document(i).getValues("stored");
      if (i < docValues.docID()) {
        assertEquals(0, expected.length);
      } else {
        String actual[] = new String[docValues.docValueCount()];
        for (int j = 0; j < actual.length; j++) {
          actual[j] = Long.toString(docValues.nextValue());
        }
        assertArrayEquals(expected, actual);
      }
    }
  }
  ir.close();
  dir.close();
}
 
Example 20
Source File: SpanWeight.java    From lucene-solr with Apache License 2.0 2 votes vote down vote up
/**
 * Return a LeafSimScorer for this context
 * @param context the LeafReaderContext
 * @return a SimWeight
 * @throws IOException on error
 */
public LeafSimScorer getSimScorer(LeafReaderContext context) throws IOException {
  return simScorer == null ? null : new LeafSimScorer(simScorer, context.reader(), field, true);
}