Java Code Examples for org.apache.lucene.util.TestUtil#nextInt()

The following examples show how to use org.apache.lucene.util.TestUtil#nextInt() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestCheckJoinIndex.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testNoParent() throws IOException {
  final Directory dir = newDirectory();
  final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  final int numDocs = TestUtil.nextInt(random(), 1, 3);
  for (int i = 0; i < numDocs; ++i) {
    w.addDocument(new Document());
  }
  final IndexReader reader = w.getReader();
  w.close();
  BitSetProducer parentsFilter = new QueryBitSetProducer(new MatchNoDocsQuery());
  try {
    expectThrows(IllegalStateException.class, () -> CheckJoinIndex.check(reader, parentsFilter));
  } finally {
    reader.close();
    dir.close();
  }
}
 
Example 2
Source File: TestBKDRadixSort.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testRandomLastByteTwoValues() throws IOException {
  int numPoints = TestUtil.nextInt(random(), 1, BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE);
  int indexDimensions = TestUtil.nextInt(random(), 1, 8);
  int dataDimensions = TestUtil.nextInt(random(), indexDimensions, 8);
  int bytesPerDim = TestUtil.nextInt(random(), 2, 30);
  int packedBytesLength = dataDimensions * bytesPerDim;
  HeapPointWriter points = new HeapPointWriter(numPoints, packedBytesLength);
  byte[] value = new byte[packedBytesLength];
  random().nextBytes(value);
  for (int i = 0; i < numPoints; i++) {
    if (random().nextBoolean()) {
      points.append(value, 1);
    } else {
      points.append(value, 2);
    }
  }
  verifySort(points, dataDimensions, indexDimensions, 0, numPoints, bytesPerDim);
}
 
Example 3
Source File: NGramTokenizerTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
  int numIters = TEST_NIGHTLY ? 10 : 1;
  for (int i = 0; i < numIters; i++) {
    final int min = TestUtil.nextInt(random(), 2, 10);
    final int max = TestUtil.nextInt(random(), min, 20);
    Analyzer a = new Analyzer() {
      @Override
      protected TokenStreamComponents createComponents(String fieldName) {
        Tokenizer tokenizer = new NGramTokenizer(min, max);
        return new TokenStreamComponents(tokenizer, tokenizer);
      }    
    };
    checkRandomData(random(), a, 200 * RANDOM_MULTIPLIER, 20);
    checkRandomData(random(), a, 10*RANDOM_MULTIPLIER, 1027);
    a.close();
  }
}
 
Example 4
Source File: TestIndexedDISI.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void assertAdvanceExactRandomized(IndexedDISI disi, BitSetIterator disi2, int disi2length, int step)
    throws IOException {
  int index = -1;
  Random random = random();
  for (int target = 0; target < disi2length; ) {
    target += TestUtil.nextInt(random, 0, step);
    int doc = disi2.docID();
    while (doc < target) {
      doc = disi2.nextDoc();
      index++;
    }

    boolean exists = disi.advanceExact(target);
    assertEquals(doc == target, exists);
    if (exists) {
      assertEquals(index, disi.index());
    } else if (random.nextBoolean()) {
      assertEquals(doc, disi.nextDoc());
      // This is a bit strange when doc == NO_MORE_DOCS as the index overcounts in the disi2 while-loop
      assertEquals(index, disi.index());
      target = doc;
    }
  }
}
 
Example 5
Source File: TestBKD.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testOneDimTwoValues() throws Exception {
  int numBytesPerDim = TestUtil.nextInt(random(), 2, 30);
  int numDataDims = TestUtil.nextInt(random(), 1, PointValues.MAX_DIMENSIONS);
  int numIndexDims = Math.min(TestUtil.nextInt(random(), 1, numDataDims), PointValues.MAX_INDEX_DIMENSIONS);

  int numDocs = atLeast(1000);
  int theDim = random().nextInt(numDataDims);
  byte[] value1 = new byte[numBytesPerDim];
  random().nextBytes(value1);
  byte[] value2 = new byte[numBytesPerDim];
  random().nextBytes(value2);
  byte[][][] docValues = new byte[numDocs][][];

  for(int docID=0;docID<numDocs;docID++) {
    byte[][] values = new byte[numDataDims][];
    for(int dim=0;dim<numDataDims;dim++) {
      if (dim == theDim) {
        values[dim] = random().nextBoolean() ? value1 : value2;
      } else {
        values[dim] = new byte[numBytesPerDim];
        random().nextBytes(values[dim]);
      }
    }
    docValues[docID] = values;
  }

  verify(docValues, null, numDataDims, numIndexDims, numBytesPerDim);
}
 
Example 6
Source File: TestMissingGroups.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public SpecialField(int numDocs, String field, Object valueX, Object valueY) {
  this.field = field;

  this.valueX = valueX;
  this.valueY = valueY;

  this.docX = TestUtil.nextInt(random(),1,numDocs-1);
  this.docY = (docX < (numDocs / 2))
    ? TestUtil.nextInt(random(),docX+1,numDocs-1)
    : TestUtil.nextInt(random(),1,docX-1);

  special_docids.add(docX);
  special_docids.add(docY);
}
 
Example 7
Source File: LogLtrRankerTests.java    From elasticsearch-learning-to-rank with Apache License 2.0 5 votes vote down vote up
public void testNewFeatureVector() throws Exception {
    int modelSize = TestUtil.nextInt(random(), 1, 20);

    final float[] expectedScores = new float[modelSize];
    LinearRankerTests.fillRandomWeights(expectedScores);

    final float[] actualScores = new float[modelSize];
    LogLtrRanker ranker = new LogLtrRanker(new NullRanker(modelSize), (i, s) -> actualScores[i] = s);
    LtrRanker.FeatureVector vector = ranker.newFeatureVector(null);
    for (int i = 0; i < expectedScores.length; i++) {
        vector.setFeatureScore(i, expectedScores[i]);
    }
    assertArrayEquals(expectedScores, actualScores, 0F);
}
 
Example 8
Source File: TestHashPartitioner.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/** Make sure CompositeIdRouter can route random IDs without throwing exceptions */
public void testRandomCompositeIds() throws Exception {
  DocRouter router = DocRouter.getDocRouter(CompositeIdRouter.NAME);
  DocCollection coll = createCollection(TestUtil.nextInt(random(), 1, 10), router);
  StringBuilder idBuilder = new StringBuilder();
  for (int i = 0 ; i < 10000 ; ++i) {
    idBuilder.setLength(0);
    int numParts = TestUtil.nextInt(random(), 1, 30);
    for (int part = 0; part < numParts; ++part) {
      switch (random().nextInt(5)) {
        case 0: idBuilder.append('!'); break;
        case 1: idBuilder.append('/'); break;
        case 2: idBuilder.append(TestUtil.nextInt(random(),-100, 1000)); break;
        default: {
          int length = TestUtil.nextInt(random(), 1, 10);
          char[] str = new char[length];
          TestUtil.randomFixedLengthUnicodeString(random(), str, 0, length);
          idBuilder.append(str);
          break;
        } 
      }
    }
    String id = idBuilder.toString();
    try {
      Slice targetSlice = router.getTargetSlice(id, null, null, null, coll);
      assertNotNull(targetSlice);
    } catch (Exception e) {
      throw new Exception("Exception routing id '" + id + "'", e);
    }
  }
}
 
Example 9
Source File: TestPendingDeletes.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testDeleteDoc() throws IOException {
  Directory dir = new ByteBuffersDirectory();
  SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, "test", 10, false, Codec.getDefault(),
      Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
  SegmentCommitInfo commitInfo = new SegmentCommitInfo(si, 0, 0, -1, -1, -1, StringHelper.randomId());
  PendingDeletes deletes = newPendingDeletes(commitInfo);
  assertNull(deletes.getLiveDocs());
  int docToDelete = TestUtil.nextInt(random(), 0, 7);
  assertTrue(deletes.delete(docToDelete));
  assertNotNull(deletes.getLiveDocs());
  assertEquals(1, deletes.numPendingDeletes());

  Bits liveDocs = deletes.getLiveDocs();
  assertFalse(liveDocs.get(docToDelete));
  assertFalse(deletes.delete(docToDelete)); // delete again

  assertTrue(liveDocs.get(8));
  assertTrue(deletes.delete(8));
  assertTrue(liveDocs.get(8)); // we have a snapshot
  assertEquals(2, deletes.numPendingDeletes());

  assertTrue(liveDocs.get(9));
  assertTrue(deletes.delete(9));
  assertTrue(liveDocs.get(9));

  // now make sure new live docs see the deletions
  liveDocs = deletes.getLiveDocs();
  assertFalse(liveDocs.get(9));
  assertFalse(liveDocs.get(8));
  assertFalse(liveDocs.get(docToDelete));
  assertEquals(3, deletes.numPendingDeletes());
  dir.close();
}
 
Example 10
Source File: TestMultiRangeQueries.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testLongRandomMultiRangeQuery() throws IOException {
  final int numDims = TestUtil.nextInt(random(), 1, 3);
  final int numVals = TestUtil.nextInt(random(), 3, 8);
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  long[] value = new long[numDims];
  for (int i = 0; i < numDims; ++i) {
    value[i] = TestUtil.nextLong(random(), 1, 10);
  }
  doc.add(new LongPoint("point", value));
  w.addDocument(doc);
  IndexReader reader = w.getReader();
  IndexSearcher searcher = new IndexSearcher(reader);
  searcher.setQueryCache(null);
  LongPointMultiRangeBuilder builder = new LongPointMultiRangeBuilder("point", numDims);
  for (int j = 0;j < numVals; j++) {
    long[] lowerBound = new long[numDims];
    long[] upperBound = new long[numDims];
    for (int i = 0; i < numDims; ++i) {
      lowerBound[i] = value[i] - random().nextInt(1);
      upperBound[i] = value[i] + random().nextInt(1);
    }
    builder.add(lowerBound, upperBound);
  }

  Query query = builder.build();
  searcher.search(query, Integer.MAX_VALUE);

  reader.close();
  w.close();
  dir.close();
}
 
Example 11
Source File: TestPointQueries.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private static int randomIntValue(Integer min, Integer max) {
  if (min == null) {
    return random().nextInt();
  } else {
    return TestUtil.nextInt(random(), min, max);
  }
}
 
Example 12
Source File: TestSortedSetDocValuesFacets.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private ExecutorService randomExecutorServiceOrNull() {
  if (random().nextBoolean()) {
    return null;
  } else {
    return new ThreadPoolExecutor(1, TestUtil.nextInt(random(), 2, 6), Long.MAX_VALUE, TimeUnit.MILLISECONDS,
                                  new LinkedBlockingQueue<Runnable>(),
                                  new NamedThreadFactory("TestIndexSearcher"));
  }
}
 
Example 13
Source File: TestExpressionSorts.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
void assertQuery(Query query, Sort sort) throws Exception {
  int size = TestUtil.nextInt(random(), 1, searcher.getIndexReader().maxDoc() / 5);
  TopDocs expected = searcher.search(query, size, sort, random().nextBoolean());
  
  // make our actual sort, mutating original by replacing some of the 
  // sortfields with equivalent expressions
  
  SortField original[] = sort.getSort();
  SortField mutated[] = new SortField[original.length];
  for (int i = 0; i < mutated.length; i++) {
    if (random().nextInt(3) > 0) {
      SortField s = original[i];
      Expression expr = JavascriptCompiler.compile(s.getField());
      SimpleBindings simpleBindings = new SimpleBindings();
      simpleBindings.add(s.getField(), fromSortField(s));
      boolean reverse = s.getType() == SortField.Type.SCORE || s.getReverse();
      mutated[i] = expr.getSortField(simpleBindings, reverse);
    } else {
      mutated[i] = original[i];
    }
  }
  
  Sort mutatedSort = new Sort(mutated);
  TopDocs actual = searcher.search(query, size, mutatedSort, random().nextBoolean());
  CheckHits.checkEqual(query, expected.scoreDocs, actual.scoreDocs);
  
  if (size < actual.totalHits.value) {
    expected = searcher.searchAfter(expected.scoreDocs[size-1], query, size, sort);
    actual = searcher.searchAfter(actual.scoreDocs[size-1], query, size, mutatedSort);
    CheckHits.checkEqual(query, expected.scoreDocs, actual.scoreDocs);
  }
}
 
Example 14
Source File: TestCloudJSONFacetSKGEquiv.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/** 
 * recursive helper method for building random facets
 *
 * @param keyCounter used to ensure every generated facet has a unique key name
 * @param maxDepth max possible depth allowed for the recusion, a lower value may be used depending on how many facets are returned at the current level. 
 */
private static Map<String,TermFacet> buildRandomFacets(AtomicInteger keyCounter, int maxDepth) {
  final int numFacets = Math.max(1, TestUtil.nextInt(random(), -1, 3)); // 3/5th chance of being '1'
  Map<String,TermFacet> results = new LinkedHashMap<>();
  for (int i = 0; i < numFacets; i++) {
    if (keyCounter.get() < 3) { // a hard limit on the total number of facets (regardless of depth) to reduce OOM risk

      final TermFacet facet = TermFacet.buildRandom();
      
      results.put("facet_" + keyCounter.incrementAndGet(), facet);
      if (0 < maxDepth) {
        // if we're going wide, don't go deep
        final int nextMaxDepth = Math.max(0, maxDepth - numFacets);
        facet.subFacets.putAll(buildRandomFacets(keyCounter, TestUtil.nextInt(random(), 0, nextMaxDepth)));
      }
      
      // we get one implicit RelatednessFacet automatically,
      // randomly add 1 or 2 more ... 3/5th chance of being '0'
      final int numExtraSKGStats = Math.max(0, TestUtil.nextInt(random(), -2, 2)); 
      for (int skgId = 0; skgId < numExtraSKGStats; skgId++) {
        // sometimes we overwrite the trivial defualt "skg" with this one...
        final String key = (0 == skgId && 0 == TestUtil.nextInt(random(), 0, 5)) ? "skg" : "skg" + skgId;
        facet.subFacets.put(key, RelatednessFacet.buildRandom());
      }

      if (1 == TestUtil.nextInt(random(), 0, 4)) {
        // occasionally add in a non-SKG related stat...
        facet.subFacets.put("sum", SumFacet.buildRandom());
      }
    }
  }
  return results;
}
 
Example 15
Source File: TestStressCloudBlindAtomicUpdates.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void run() {
  final String origThreadName = Thread.currentThread().getName();
  try {
    Thread.currentThread().setName(origThreadName + "-w" + workerId);
    final int maxDocMultiplier = expected.length-1;
    for (int docIter = 0; docIter < numDocsToUpdate; docIter++) {

      final int docId = DOC_ID_INCR * TestUtil.nextInt(rand, 0, maxDocMultiplier);

      // tweak our thread name to keep track of what we're up to
      Thread.currentThread().setName(origThreadName + "-w" + workerId + "-d" + docId);

      // no matter how random the doc selection may be per thread, ensure
      // every doc that is selected by *a* thread gets at least a couple rapid fire updates
      final int itersPerDoc = atLeast(rand, 2);
      
      for (int updateIter = 0; updateIter < itersPerDoc; updateIter++) {
        if (0 == abortLatch.getCount()) {
          return;
        }
        doRandomAtomicUpdate(docId);
      }
      if (rand.nextBoolean()) { Thread.yield(); }
    }
    
  } catch (Error err) {
    log.error(Thread.currentThread().getName(), err);
    abortLatch.countDown();
    throw err;
  } catch (Exception ex) {
    log.error(Thread.currentThread().getName(), ex);
    abortLatch.countDown();
    throw new RuntimeException(ex.getMessage(), ex);
  } finally {
    Thread.currentThread().setName(origThreadName);
  }
  ok = true;
}
 
Example 16
Source File: TestTieredMergePolicy.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testPartialMerge() throws Exception {
  int num = atLeast(10);
  for(int iter=0;iter<num;iter++) {
    if (VERBOSE) {
      System.out.println("TEST: iter=" + iter);
    }
    Directory dir = newDirectory();
    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
    conf.setMergeScheduler(new SerialMergeScheduler());
    TieredMergePolicy tmp = newTieredMergePolicy();
    conf.setMergePolicy(tmp);
    conf.setMaxBufferedDocs(2);
    tmp.setMaxMergeAtOnce(3);
    tmp.setSegmentsPerTier(6);

    IndexWriter w = new IndexWriter(dir, conf);
    int maxCount = 0;
    final int numDocs = TestUtil.nextInt(random(), 20, 100);
    for(int i=0;i<numDocs;i++) {
      Document doc = new Document();
      doc.add(newTextField("content", "aaa " + (i%4), Field.Store.NO));
      w.addDocument(doc);
      int count = w.getSegmentCount();
      maxCount = Math.max(count, maxCount);
      assertTrue("count=" + count + " maxCount=" + maxCount, count >= maxCount-3);
    }

    w.flush(true, true);

    int segmentCount = w.getSegmentCount();
    int targetCount = TestUtil.nextInt(random(), 1, segmentCount);
    if (VERBOSE) {
      System.out.println("TEST: merge to " + targetCount + " segs (current count=" + segmentCount + ")");
    }
    w.forceMerge(targetCount);

    final double maxSegmentSize = Math.max(tmp.getMaxMergedSegmentMB(), tmp.getFloorSegmentMB());
    final long max125Pct = (long) ((maxSegmentSize * 1024.0 * 1024.0) * 1.25);
    // Other than in the case where the target count is 1 we can't say much except no segment should be > 125% of max seg size.
    if (targetCount == 1) {
      assertEquals("Should have merged down to one segment", targetCount, w.getSegmentCount());
    } else {
      // why can't we say much? Well...
      // 1> the random numbers generated above mean we could have 10 segments and a target max count of, say, 9. we
      //    could get there by combining only 2 segments. So tests like "no pair of segments should total less than
      //    125% max segment size" aren't valid.
      //
      // 2> We could have 10 segments and a target count of 2. In that case there could be 5 segments resulting.
      //    as long as they're all < 125% max seg size, that's valid.
      Iterator<SegmentCommitInfo> iterator = w.cloneSegmentInfos().iterator();
      while (iterator.hasNext()) {
        SegmentCommitInfo info = iterator.next();
        assertTrue("No segment should be more than 125% of max segment size ",
            max125Pct >= info.sizeInBytes());
      }
    }

    w.close();
    dir.close();
  }
}
 
Example 17
Source File: TestCloudPivotFacet.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void initUseFieldRandomizedFactor() {
  useFieldRandomizedFactor = TestUtil.nextInt(random(), 2, 30);
  log.info("init'ing useFieldRandomizedFactor = {}", useFieldRandomizedFactor);
}
 
Example 18
Source File: TestSearcherManager.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testSearcherManager() throws Exception {
  pruner = new SearcherLifetimeManager.PruneByAge(TEST_NIGHTLY ? TestUtil.nextInt(random(), 1, 20) : 1);
  runTest("TestSearcherManager");
}
 
Example 19
Source File: TestDocValuesStatsCollector.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testDocsWithMultipleLongValues() throws IOException {
  try (Directory dir = newDirectory();
      IndexWriter indexWriter = new IndexWriter(dir, newIndexWriterConfig())) {
    String field = "numeric";
    int numDocs = TestUtil.nextInt(random(), 1, 100);
    long[][] docValues = new long[numDocs][];
    long nextVal = 1;
    for (int i = 0; i < numDocs; i++) {
      Document doc = new Document();
      if (random().nextBoolean()) { // not all documents have a value
        int numValues = TestUtil.nextInt(random(), 1, 5);
        docValues[i] = new long[numValues];
        for (int j = 0; j < numValues; j++) {
          doc.add(new SortedNumericDocValuesField(field, nextVal));
          docValues[i][j] = nextVal;
          ++nextVal;
        }
        doc.add(new StringField("id", "doc" + i, Store.NO));
      }
      indexWriter.addDocument(doc);
    }

    // 20% of cases delete some docs
    if (random().nextDouble() < 0.2) {
      for (int i = 0; i < numDocs; i++) {
        if (random().nextBoolean()) {
          indexWriter.deleteDocuments(new Term("id", "doc" + i));
          docValues[i] = null;
        }
      }
    }

    try (DirectoryReader reader = DirectoryReader.open(indexWriter)) {
      IndexSearcher searcher = new IndexSearcher(reader);
      SortedLongDocValuesStats stats = new SortedLongDocValuesStats(field);
      searcher.search(new MatchAllDocsQuery(), new DocValuesStatsCollector(stats));

      assertEquals(nonNull(docValues).count(), stats.count());
      int numDocsWithoutField = (int) isNull(docValues).count();
      assertEquals(computeExpMissing(numDocsWithoutField, numDocs, reader), stats.missing());
      if (stats.count() > 0) {
        LongSummaryStatistics sumStats = filterAndFlatValues(docValues, (v) -> v != null).summaryStatistics();
        assertEquals(sumStats.getMax(), stats.max().longValue());
        assertEquals(sumStats.getMin(), stats.min().longValue());
        assertEquals(sumStats.getAverage(), stats.mean(), 0.00001);
        assertEquals(sumStats.getSum(), stats.sum().longValue());
        assertEquals(sumStats.getCount(), stats.valuesCount());
        double variance = computeVariance(filterAndFlatValues(docValues, (v) -> v != null), stats.mean, stats.count());
        assertEquals(variance, stats.variance(), 0.00001);
        assertEquals(Math.sqrt(variance), stats.stdev(), 0.00001);
      }
    }
  }
}
 
Example 20
Source File: TestLegacyFieldCache.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testLongFieldCache() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
  cfg.setMergePolicy(newLogMergePolicy());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
  Document doc = new Document();
  LegacyLongField field = new LegacyLongField("f", 0L, Store.YES);
  doc.add(field);
  final long[] values = new long[TestUtil.nextInt(random(), 1, 10)];
  Set<Integer> missing = new HashSet<>();
  for (int i = 0; i < values.length; ++i) {
    final long v;
    switch (random().nextInt(10)) {
      case 0:
        v = Long.MIN_VALUE;
        break;
      case 1:
        v = 0;
        break;
      case 2:
        v = Long.MAX_VALUE;
        break;
      default:
        v = TestUtil.nextLong(random(), -10, 10);
        break;
    }
    values[i] = v;
    if (v == 0 && random().nextBoolean()) {
      // missing
      iw.addDocument(new Document());
      missing.add(i);
    } else {
      field.setLongValue(v);
      iw.addDocument(doc);
    }
  }
  iw.forceMerge(1);
  final DirectoryReader reader = iw.getReader();
  final NumericDocValues longs = FieldCache.DEFAULT.getNumerics(getOnlyLeafReader(reader), "f", FieldCache.LEGACY_LONG_PARSER);
  for (int i = 0; i < values.length; ++i) {
    if (missing.contains(i) == false) {
      assertEquals(i, longs.nextDoc());
      assertEquals(values[i], longs.longValue());
    }
  }
  assertEquals(NO_MORE_DOCS, longs.nextDoc());
  reader.close();
  iw.close();
  dir.close();
}