Java Code Examples for org.apache.lucene.index.DirectoryReader#close()

The following examples show how to use org.apache.lucene.index.DirectoryReader#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestRegexCompletionQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testEmptyRegexContextQuery() throws Exception {
  Analyzer analyzer = new MockAnalyzer(random());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
  Document document = new Document();
  document.add(new ContextSuggestField("suggest_field", "suggestion", 1, "type"));
  iw.addDocument(document);

  if (rarely()) {
    iw.commit();
  }

  DirectoryReader reader = iw.getReader();
  SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
  ContextQuery query = new ContextQuery(new RegexCompletionQuery(new Term("suggest_field", "")));
  query.addContext("type", 1);

  TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false);
  assertEquals(0, suggest.scoreDocs.length);

  reader.close();
  iw.close();
}
 
Example 2
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testPropagatesScorerSupplier() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = w.getReader();
  IndexSearcher searcher = newSearcher(reader);
  searcher.setQueryCachingPolicy(NEVER_CACHE);

  LRUQueryCache cache = new LRUQueryCache(1, 1000);
  searcher.setQueryCache(cache);

  AtomicBoolean scorerCreated = new AtomicBoolean(false);
  Query query = new DummyQuery2(scorerCreated);
  Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1);
  ScorerSupplier supplier = weight.scorerSupplier(searcher.getIndexReader().leaves().get(0));
  assertFalse(scorerCreated.get());
  supplier.get(random().nextLong() & 0x7FFFFFFFFFFFFFFFL);
  assertTrue(scorerCreated.get());

  reader.close();
  w.close();
  dir.close();
}
 
Example 3
Source File: AssociationsFacetsExample.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** User runs a query and aggregates facets by summing their association values. */
private List<FacetResult> sumAssociations() throws IOException {
  DirectoryReader indexReader = DirectoryReader.open(indexDir);
  IndexSearcher searcher = new IndexSearcher(indexReader);
  TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
  
  FacetsCollector fc = new FacetsCollector();
  
  // MatchAllDocsQuery is for "browsing" (counts facets
  // for all non-deleted docs in the index); normally
  // you'd use a "normal" query:
  FacetsCollector.search(searcher, new MatchAllDocsQuery(), 10, fc);
  
  Facets tags = new TaxonomyFacetSumIntAssociations("$tags", taxoReader, config, fc);
  Facets genre = new TaxonomyFacetSumFloatAssociations("$genre", taxoReader, config, fc);

  // Retrieve results
  List<FacetResult> results = new ArrayList<>();
  results.add(tags.getTopChildren(10, "tags"));
  results.add(genre.getTopChildren(10, "genre"));

  indexReader.close();
  taxoReader.close();
  
  return results;
}
 
Example 4
Source File: TestPrefixCompletionQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testEmptyPrefixQuery() throws Exception {
  Analyzer analyzer = new MockAnalyzer(random());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
  Document document = new Document();
  document.add(new SuggestField("suggest_field", "suggestion1", 1));
  iw.addDocument(document);

  if (rarely()) {
    iw.commit();
  }

  DirectoryReader reader = iw.getReader();
  SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
  PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", ""));

  TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false);
  assertEquals(0, suggest.scoreDocs.length);

  reader.close();
  iw.close();
}
 
Example 5
Source File: FacetStorageTest.java    From lumongo with Apache License 2.0 6 votes vote down vote up
/** User drills down on 'Publish Year/2010'. */
private FacetResult drillDown() throws IOException {
	DirectoryReader indexReader = DirectoryReader.open(directory);
	IndexSearcher searcher = new IndexSearcher(indexReader);
	SortedSetDocValuesReaderState state = new DefaultSortedSetDocValuesReaderState(indexReader);
	
	// Now user drills down on Publish Year/2010:
	DrillDownQuery q = new DrillDownQuery(config);
	q.add("Publish Year", "2010");
	FacetsCollector fc = new FacetsCollector();
	FacetsCollector.search(searcher, q, 10, fc);
	
	// Retrieve results
	Facets facets = new SortedSetDocValuesFacetCounts(state, fc);
	FacetResult result = facets.getTopChildren(10, "Author");
	indexReader.close();
	
	return result;
}
 
Example 6
Source File: TestSuggestField.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Test
public void testEmpty() throws Exception {
  Analyzer analyzer = new MockAnalyzer(random());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
  DirectoryReader reader = iw.getReader();
  SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
  PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "ab"));
  TopSuggestDocs lookupDocs = suggestIndexSearcher.suggest(query, 3, false);
  assertThat(lookupDocs.totalHits.value, equalTo(0L));
  reader.close();
  iw.close();
}
 
Example 7
Source File: TestFeatureField.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testComputePivotFeatureValue() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig());

  // Make sure that we create a legal pivot on missing features
  DirectoryReader reader = writer.getReader();
  float pivot = FeatureField.computePivotFeatureValue(reader, "features", "pagerank");
  assertTrue(Float.isFinite(pivot));
  assertTrue(pivot > 0);
  reader.close();

  Document doc = new Document();
  FeatureField pagerank = new FeatureField("features", "pagerank", 1);
  doc.add(pagerank);

  pagerank.setFeatureValue(10);
  writer.addDocument(doc);

  pagerank.setFeatureValue(100);
  writer.addDocument(doc);

  writer.addDocument(new Document()); // gap

  pagerank.setFeatureValue(1);
  writer.addDocument(doc);

  pagerank.setFeatureValue(42);
  writer.addDocument(doc);

  reader = writer.getReader();
  writer.close();

  pivot = FeatureField.computePivotFeatureValue(reader, "features", "pagerank");
  double expected = Math.pow(10 * 100 * 1 * 42, 1/4.); // geometric mean
  assertEquals(expected, pivot, 0.1);

  reader.close();
  dir.close();
}
 
Example 8
Source File: TestLegacyFieldCache.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testDocValuesIntegration() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig(null);
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
  Document doc = new Document();
  doc.add(new BinaryDocValuesField("binary", new BytesRef("binary value")));
  doc.add(new SortedDocValuesField("sorted", new BytesRef("sorted value")));
  doc.add(new NumericDocValuesField("numeric", 42));
  doc.add(new SortedSetDocValuesField("sortedset", new BytesRef("sortedset value1")));
  doc.add(new SortedSetDocValuesField("sortedset", new BytesRef("sortedset value2")));
  iw.addDocument(doc);
  DirectoryReader ir = iw.getReader();
  iw.close();
  LeafReader ar = getOnlyLeafReader(ir);
  
  // Binary type: can be retrieved via getTerms()
  expectThrows(IllegalStateException.class, () -> {
    FieldCache.DEFAULT.getNumerics(ar, "binary", FieldCache.LEGACY_INT_PARSER);
  });
  
  // Sorted type: can be retrieved via getTerms(), getTermsIndex(), getDocTermOrds()
  expectThrows(IllegalStateException.class, () -> {
    FieldCache.DEFAULT.getNumerics(ar, "sorted", FieldCache.LEGACY_INT_PARSER);
  });
  
  // Numeric type: can be retrieved via getInts() and so on
  NumericDocValues numeric = FieldCache.DEFAULT.getNumerics(ar, "numeric", FieldCache.LEGACY_INT_PARSER);
  assertEquals(0, numeric.nextDoc());
  assertEquals(42, numeric.longValue());
     
  // SortedSet type: can be retrieved via getDocTermOrds() 
  expectThrows(IllegalStateException.class, () -> {
    FieldCache.DEFAULT.getNumerics(ar, "sortedset", FieldCache.LEGACY_INT_PARSER);
  });
  
  ir.close();
  dir.close();
}
 
Example 9
Source File: TestSuggestField.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Test
public void testNRTDeletedDocFiltering() throws Exception {
  Analyzer analyzer = new MockAnalyzer(random());
  // using IndexWriter instead of RandomIndexWriter
  IndexWriter iw = new IndexWriter(dir, iwcWithSuggestField(analyzer, "suggest_field"));

  int num = Math.min(1000, atLeast(10));

  int numLive = 0;
  List<Entry> expectedEntries = new ArrayList<>();
  for (int i = 0; i < num; i++) {
    Document document = new Document();
    document.add(new SuggestField("suggest_field", "abc_" + i, num - i));
    if (i % 2 == 0) {
      document.add(newStringField("str_field", "delete", Field.Store.YES));
    } else {
      numLive++;
      expectedEntries.add(new Entry("abc_" + i, num - i));
      document.add(newStringField("str_field", "no_delete", Field.Store.YES));
    }
    iw.addDocument(document);

    if (usually()) {
      iw.commit();
    }
  }

  iw.deleteDocuments(new Term("str_field", "delete"));

  DirectoryReader reader = DirectoryReader.open(iw);
  SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);
  PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"));
  TopSuggestDocs suggest = indexSearcher.suggest(query, numLive, false);
  assertSuggestions(suggest, expectedEntries.toArray(new Entry[expectedEntries.size()]));

  reader.close();
  iw.close();
}
 
Example 10
Source File: TestContextQuery.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Test
public void testContextPrecedenceBoost() throws Exception {
  Analyzer analyzer = new MockAnalyzer(random());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
  Document document = new Document();

  document.add(new ContextSuggestField("suggest_field", "suggestion1", 4, "typetype"));
  document.add(new ContextSuggestField("suggest_field", "suggestion2", 3, "type"));
  iw.addDocument(document);

  if (rarely()) {
    iw.commit();
  }

  DirectoryReader reader = iw.getReader();
  SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
  ContextQuery query = new ContextQuery(new PrefixCompletionQuery(analyzer, new Term("suggest_field", "sugg")));
  query.addContext("type", 1);
  query.addContext("typetype", 2);
  TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false);
  assertSuggestions(suggest,
      new Entry("suggestion1", "typetype", 4 * 2),
      new Entry("suggestion2", "type", 3 * 1)
  );

  reader.close();
  iw.close();
}
 
Example 11
Source File: DataSplitterTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public static void assertSplit(LeafReader originalIndex, double testRatio, double crossValidationRatio, String... fieldNames) throws Exception {

    BaseDirectoryWrapper trainingIndex = newDirectory();
    BaseDirectoryWrapper testIndex = newDirectory();
    BaseDirectoryWrapper crossValidationIndex = newDirectory();

    try {
      DatasetSplitter datasetSplitter = new DatasetSplitter(testRatio, crossValidationRatio);
      datasetSplitter.split(originalIndex, trainingIndex, testIndex, crossValidationIndex, new MockAnalyzer(random()), true, classFieldName, fieldNames);

      assertNotNull(trainingIndex);
      assertNotNull(testIndex);
      assertNotNull(crossValidationIndex);

      DirectoryReader trainingReader = DirectoryReader.open(trainingIndex);
      assertEquals((int) (originalIndex.maxDoc() * (1d - testRatio - crossValidationRatio)), trainingReader.maxDoc(), 20);
      DirectoryReader testReader = DirectoryReader.open(testIndex);
      assertEquals((int) (originalIndex.maxDoc() * testRatio), testReader.maxDoc(), 20);
      DirectoryReader cvReader = DirectoryReader.open(crossValidationIndex);
      assertEquals((int) (originalIndex.maxDoc() * crossValidationRatio), cvReader.maxDoc(), 20);

      trainingReader.close();
      testReader.close();
      cvReader.close();
      closeQuietly(trainingReader);
      closeQuietly(testReader);
      closeQuietly(cvReader);
    } finally {
      if (trainingIndex != null) {
        trainingIndex.close();
      }
      if (testIndex != null) {
        testIndex.close();
      }
      if (crossValidationIndex != null) {
        crossValidationIndex.close();
      }
    }
  }
 
Example 12
Source File: MtasSearchTestConsistency.java    From mtas with Apache License 2.0 4 votes vote down vote up
/**
 * Collect group.
 *
 * @throws IOException Signals that an I/O exception has occurred.
 */
@org.junit.Test
public void collectGroup() throws IOException {
  String cql = "[pos=\"LID\"]";
  DirectoryReader indexReader = DirectoryReader.open(directory);
  try {
    ArrayList<Integer> fullDocSet = docs;
    ComponentField fieldStats = new ComponentField(FIELD_ID);
    MtasSpanQuery q = createQuery(FIELD_CONTENT, cql, null, null, false);
    fieldStats.spanQueryList.add(q);
    fieldStats.statsSpanList.add(new ComponentSpan(new MtasSpanQuery[] { q },
        "total", null, null, "sum", null, null, null));
    fieldStats.groupList.add(new ComponentGroup(q, "articles",
        Integer.MAX_VALUE, 0, "t_lc", null, null, null, null, null, null,
        null, null, null, null, null, null));
    HashMap<String, HashMap<String, Object>> response = doAdvancedSearch(
        fullDocSet, fieldStats);
    ArrayList<HashMap<String, Object>> list = (ArrayList<HashMap<String, Object>>) response
        .get("group").get("articles");
    int subTotal = 0;
    for (HashMap<String, Object> listItem : list) {
      HashMap<String, HashMap<Integer, HashMap<String, String>[]>> group = (HashMap<String, HashMap<Integer, HashMap<String, String>[]>>) listItem
          .get("group");
      HashMap<Integer, HashMap<String, String>[]> hitList = group.get("hit");
      HashMap<String, String> hitListItem = hitList.get(0)[0];
      cql = "[pos=\"LID\" & " + hitListItem.get("prefix") + "=\""
          + hitListItem.get("value") + "\"]";
      QueryResult queryResult = doQuery(indexReader, FIELD_CONTENT, cql, null,
          null, null, false);
      assertEquals(
          "number of hits for articles equals to " + hitListItem.get("value"),
          listItem.get("sum"), Long.valueOf(queryResult.hits));
      subTotal += queryResult.hits;
    }
    HashMap<String, Object> responseTotal = (HashMap<String, Object>) response
        .get("statsSpans").get("total");
    Long total = responseTotal != null ? (Long) responseTotal.get("sum") : 0;
    assertEquals("Total number of articles", total, Long.valueOf(subTotal));
    indexReader.close();
  } catch (ParseException | mtas.parser.function.ParseException e) {
    log.error(e);
  } finally {
    indexReader.close();
  }
}
 
Example 13
Source File: TestLatLonPointDistanceFeatureQuery.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testBasics() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig()
      .setMergePolicy(newLogMergePolicy(random().nextBoolean())));
  Document doc = new Document();
  LatLonPoint point = new LatLonPoint("foo", 0.0, 0.0);
  doc.add(point);
  LatLonDocValuesField docValue = new LatLonDocValuesField("foo",0.0, 0.0);
  doc.add(docValue);

  double pivotDistance = 5000;//5k

  point.setLocationValue(-7, -7);
  docValue.setLocationValue(-7, -7);
  w.addDocument(doc);

  point.setLocationValue(9, 9);
  docValue.setLocationValue(9, 9);
  w.addDocument(doc);


  point.setLocationValue(8, 8);
  docValue.setLocationValue(8, 8);
  w.addDocument(doc);

  point.setLocationValue(4, 4);
  docValue.setLocationValue(4, 4);
  w.addDocument(doc);

  point.setLocationValue(-1, -1);
  docValue.setLocationValue(-1, -1);
  w.addDocument(doc);

  DirectoryReader reader = w.getReader();
  IndexSearcher searcher = newSearcher(reader);
  
  Query q = LatLonPoint.newDistanceFeatureQuery("foo", 3, 10, 10, pivotDistance);
  TopScoreDocCollector collector = TopScoreDocCollector.create(2, null, 1);
  searcher.search(q, collector);
  TopDocs topHits = collector.topDocs();
  assertEquals(2, topHits.scoreDocs.length);

  double distance1 = SloppyMath.haversinMeters(GeoEncodingUtils.decodeLatitude(GeoEncodingUtils.encodeLatitude(9)) , GeoEncodingUtils.decodeLongitude(GeoEncodingUtils.encodeLongitude(9)), 10,10);
  double distance2 = SloppyMath.haversinMeters(GeoEncodingUtils.decodeLatitude(GeoEncodingUtils.encodeLatitude(8)) , GeoEncodingUtils.decodeLongitude(GeoEncodingUtils.encodeLongitude(8)), 10,10);

  CheckHits.checkEqual(q,
      new ScoreDoc[] {
          new ScoreDoc(1, (float) (3f * (pivotDistance / (pivotDistance + distance1)))),
          new ScoreDoc(2, (float) (3f * (pivotDistance / (pivotDistance + distance2))))
      },
      topHits.scoreDocs);

  distance1 = SloppyMath.haversinMeters(GeoEncodingUtils.decodeLatitude(GeoEncodingUtils.encodeLatitude(9)) , GeoEncodingUtils.decodeLongitude(GeoEncodingUtils.encodeLongitude(9)), 9,9);
  distance2 = SloppyMath.haversinMeters(GeoEncodingUtils.decodeLatitude(GeoEncodingUtils.encodeLatitude(8)) , GeoEncodingUtils.decodeLongitude(GeoEncodingUtils.encodeLongitude(8)), 9,9);

  q = LatLonPoint.newDistanceFeatureQuery("foo", 3, 9, 9,  pivotDistance);
  collector = TopScoreDocCollector.create(2, null, 1);
  searcher.search(q, collector);
  topHits = collector.topDocs();
  assertEquals(2, topHits.scoreDocs.length);
  CheckHits.checkExplanations(q, "", searcher);

  CheckHits.checkEqual(q,
      new ScoreDoc[] {
          new ScoreDoc(1, (float) (3f * (pivotDistance / (pivotDistance + distance1)))),
          new ScoreDoc(2, (float) (3f * (pivotDistance / (pivotDistance + distance2))))
      },
      topHits.scoreDocs);
  
  reader.close();
  w.close();
  dir.close();
}
 
Example 14
Source File: TestDirectoryTaxonomyWriter.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
public void testCommitUserData() throws Exception {
  // Verifies taxonomy commit data
  Directory dir = newDirectory();
  DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(dir, OpenMode.CREATE_OR_APPEND, NO_OP_CACHE);
  assertTrue(taxoWriter.getCache() == NO_OP_CACHE);
  taxoWriter.addCategory(new FacetLabel("a"));
  taxoWriter.addCategory(new FacetLabel("b"));
  Map<String, String> userCommitData = new HashMap<>();
  userCommitData.put("testing", "1 2 3");
  taxoWriter.setLiveCommitData(userCommitData.entrySet());
  taxoWriter.close();
  DirectoryReader r = DirectoryReader.open(dir);
  assertEquals("2 categories plus root should have been committed to the underlying directory", 3, r.numDocs());
  Map <String, String> readUserCommitData = r.getIndexCommit().getUserData();
  assertTrue("wrong value extracted from commit data", 
      "1 2 3".equals(readUserCommitData.get("testing")));
  assertNotNull(DirectoryTaxonomyWriter.INDEX_EPOCH + " not found in commitData", readUserCommitData.get(DirectoryTaxonomyWriter.INDEX_EPOCH));
  r.close();
  
  // open DirTaxoWriter again and commit, INDEX_EPOCH should still exist
  // in the commit data, otherwise DirTaxoReader.refresh() might not detect
  // that the taxonomy index has been recreated.
  taxoWriter = new DirectoryTaxonomyWriter(dir, OpenMode.CREATE_OR_APPEND, NO_OP_CACHE);
  taxoWriter.addCategory(new FacetLabel("c")); // add a category so that commit will happen
  taxoWriter.setLiveCommitData(new HashMap<String, String>(){{
    put("just", "data");
  }}.entrySet());
  taxoWriter.commit();
  
  // verify taxoWriter.getCommitData()
  Map<String,String> data = new HashMap<>();
  Iterable<Map.Entry<String,String>> iter = taxoWriter.getLiveCommitData();
  if (iter != null) {
    for(Map.Entry<String,String> ent : iter) {
      data.put(ent.getKey(), ent.getValue());
    }
  }
  
  assertNotNull(DirectoryTaxonomyWriter.INDEX_EPOCH
      + " not found in taoxWriter.commitData", data.get(DirectoryTaxonomyWriter.INDEX_EPOCH));
  taxoWriter.close();
  
  r = DirectoryReader.open(dir);
  readUserCommitData = r.getIndexCommit().getUserData();
  assertNotNull(DirectoryTaxonomyWriter.INDEX_EPOCH + " not found in commitData", readUserCommitData.get(DirectoryTaxonomyWriter.INDEX_EPOCH));
  r.close();
  
  dir.close();
}
 
Example 15
Source File: TestPrefixCompletionQuery.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testMostlyFilteredOutDocuments() throws Exception {
  Analyzer analyzer = new MockAnalyzer(random());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
  int num = Math.min(1000, atLeast(10));
  for (int i = 0; i < num; i++) {
    Document document = new Document();
    document.add(new SuggestField("suggest_field", "abc_" + i, i));
    document.add(new NumericDocValuesField("filter_int_fld", i));
    iw.addDocument(document);

    if (usually()) {
      iw.commit();
    }
  }

  DirectoryReader reader = iw.getReader();
  SuggestIndexSearcher indexSearcher = new SuggestIndexSearcher(reader);

  int topScore = num/2;
  BitsProducer filter = new NumericRangeBitsProducer("filter_int_fld", 0, topScore);
  PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"), filter);
  // if at most half of the top scoring documents have been filtered out
  // the search should be admissible for a single segment
  TopSuggestDocs suggest = indexSearcher.suggest(query, num, false);
  assertTrue(suggest.totalHits.value >= 1);
  assertThat(suggest.scoreLookupDocs()[0].key.toString(), equalTo("abc_" + topScore));
  assertThat(suggest.scoreLookupDocs()[0].score, equalTo((float) topScore));

  filter = new NumericRangeBitsProducer("filter_int_fld", 0, 0);
  query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"), filter);
  // if more than half of the top scoring documents have been filtered out
  // search is not admissible, so # of suggestions requested is num instead of 1
  suggest = indexSearcher.suggest(query, num, false);
  assertSuggestions(suggest, new Entry("abc_0", 0));

  filter = new NumericRangeBitsProducer("filter_int_fld", num - 1, num - 1);
  query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc_"), filter);
  // if only lower scoring documents are filtered out
  // search is admissible
  suggest = indexSearcher.suggest(query, 1, false);
  assertSuggestions(suggest, new Entry("abc_" + (num - 1), num - 1));

  reader.close();
  iw.close();
}
 
Example 16
Source File: TestFieldCache.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testIntFieldCache() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
  cfg.setMergePolicy(newLogMergePolicy());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
  Document doc = new Document();
  IntPoint field = new IntPoint("f", 0);
  doc.add(field);
  final int[] values = new int[TestUtil.nextInt(random(), 1, 10)];
  Set<Integer> missing = new HashSet<>();
  for (int i = 0; i < values.length; ++i) {
    final int v;
    switch (random().nextInt(10)) {
      case 0:
        v = Integer.MIN_VALUE;
        break;
      case 1:
        v = 0;
        break;
      case 2:
        v = Integer.MAX_VALUE;
        break;
      default:
        v = TestUtil.nextInt(random(), -10, 10);
        break;
    }
    values[i] = v;
    if (v == 0 && random().nextBoolean()) {
      // missing
      iw.addDocument(new Document());
      missing.add(i);
    } else {
      field.setIntValue(v);
      iw.addDocument(doc);
    }
  }
  iw.forceMerge(1);
  final DirectoryReader reader = iw.getReader();
  final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlyLeafReader(reader), "f", FieldCache.INT_POINT_PARSER);
  for (int i = 0; i < values.length; ++i) {
    if (missing.contains(i) == false) {
      assertEquals(i, ints.nextDoc());
      assertEquals(values[i], ints.longValue());
    }
  }
  assertEquals(NO_MORE_DOCS, ints.nextDoc());
  reader.close();
  iw.close();
  dir.close();
}
 
Example 17
Source File: TestFieldCacheVsDocValues.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private void doTestSortedVsFieldCache(int minLength, int maxLength) throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
  Document doc = new Document();
  Field idField = new StringField("id", "", Field.Store.NO);
  Field indexedField = new StringField("indexed", "", Field.Store.NO);
  Field dvField = new SortedDocValuesField("dv", new BytesRef());
  doc.add(idField);
  doc.add(indexedField);
  doc.add(dvField);
  
  // index some docs
  int numDocs = atLeast(300);
  for (int i = 0; i < numDocs; i++) {
    idField.setStringValue(Integer.toString(i));
    final int length;
    if (minLength == maxLength) {
      length = minLength; // fixed length
    } else {
      length = TestUtil.nextInt(random(), minLength, maxLength);
    }
    String value = TestUtil.randomSimpleString(random(), length);
    indexedField.setStringValue(value);
    dvField.setBytesValue(new BytesRef(value));
    writer.addDocument(doc);
    if (random().nextInt(31) == 0) {
      writer.commit();
    }
  }
  
  // delete some docs
  int numDeletions = random().nextInt(numDocs/10);
  for (int i = 0; i < numDeletions; i++) {
    int id = random().nextInt(numDocs);
    writer.deleteDocuments(new Term("id", Integer.toString(id)));
  }
  writer.close();
  
  // compare
  DirectoryReader ir = DirectoryReader.open(dir);
  for (LeafReaderContext context : ir.leaves()) {
    LeafReader r = context.reader();
    SortedDocValues expected = FieldCache.DEFAULT.getTermsIndex(r, "indexed");
    SortedDocValues actual = r.getSortedDocValues("dv");
    assertEquals(r.maxDoc(), expected, actual);
  }
  ir.close();
  dir.close();
}
 
Example 18
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testLRUEviction() throws Exception {
  Directory dir = newDirectory();
  final RandomIndexWriter w = new RandomIndexWriter(random(), dir);

  Document doc = new Document();
  StringField f = new StringField("color", "blue", Store.NO);
  doc.add(f);
  w.addDocument(doc);
  f.setStringValue("red");
  w.addDocument(doc);
  f.setStringValue("green");
  w.addDocument(doc);
  final DirectoryReader reader = w.getReader();
  final IndexSearcher searcher = newSearcher(reader);
  final LRUQueryCache queryCache = new LRUQueryCache(2, 100000, context -> true, Float.POSITIVE_INFINITY);

  final Query blue = new TermQuery(new Term("color", "blue"));
  final Query red = new TermQuery(new Term("color", "red"));
  final Query green = new TermQuery(new Term("color", "green"));

  assertEquals(Collections.emptyList(), queryCache.cachedQueries());

  searcher.setQueryCache(queryCache);
  // the filter is not cached on any segment: no changes
  searcher.setQueryCachingPolicy(NEVER_CACHE);
  searcher.search(new ConstantScoreQuery(green), 1);
  assertEquals(Collections.emptyList(), queryCache.cachedQueries());

  searcher.setQueryCachingPolicy(ALWAYS_CACHE);
  searcher.search(new ConstantScoreQuery(red), 1);

  if (!(queryCache.cachedQueries().equals(Collections.emptyList()))) {
    assertEquals(Arrays.asList(red), queryCache.cachedQueries());
  } else {
    // Let the cache load be completed
    Thread.sleep(200);
    assertEquals(Arrays.asList(red), queryCache.cachedQueries());
  }

  searcher.search(new ConstantScoreQuery(green), 1);

  if (!(queryCache.cachedQueries().equals(Arrays.asList(red)))) {
    assertEquals(Arrays.asList(red, green), queryCache.cachedQueries());
  } else {
    // Let the cache load be completed
    Thread.sleep(200);
    assertEquals(Arrays.asList(red, green), queryCache.cachedQueries());
  }

  searcher.search(new ConstantScoreQuery(red), 1);
  assertEquals(Arrays.asList(green, red), queryCache.cachedQueries());

  searcher.search(new ConstantScoreQuery(blue), 1);

  if (!(queryCache.cachedQueries().equals(Arrays.asList(green, red)))) {
    assertEquals(Arrays.asList(red, blue), queryCache.cachedQueries());
  } else {
    // Let the cache load be completed
    Thread.sleep(200);
    assertEquals(Arrays.asList(red, blue), queryCache.cachedQueries());
  }

  searcher.search(new ConstantScoreQuery(blue), 1);
  assertEquals(Arrays.asList(red, blue), queryCache.cachedQueries());

  searcher.search(new ConstantScoreQuery(green), 1);

  if (!(queryCache.cachedQueries().equals(Arrays.asList(red, blue)))) {
    assertEquals(Arrays.asList(blue, green), queryCache.cachedQueries());
  } else {
    // Let the cache load be completed
    Thread.sleep(200);
    assertEquals(Arrays.asList(blue, green), queryCache.cachedQueries());
  }

  searcher.setQueryCachingPolicy(NEVER_CACHE);
  searcher.search(new ConstantScoreQuery(red), 1);
  assertEquals(Arrays.asList(blue, green), queryCache.cachedQueries());

  reader.close();
  w.close();
  dir.close();
}
 
Example 19
Source File: SolrIndexSplitterTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private void doTestSplitAlternately(SolrIndexSplitter.SplitMethod splitMethod) throws Exception {
  LocalSolrQueryRequest request = null;
  Directory directory = null;
  try {
    // add an even number of docs
    int max = (1 + random().nextInt(10)) * 3;
    log.info("Adding {} number of documents", max);
    for (int i = 0; i < max; i++) {
      assertU(adoc("id", String.valueOf(i)));
    }
    assertU(commit());

    request = lrf.makeRequest("q", "dummy");
    SolrQueryResponse rsp = new SolrQueryResponse();
    SplitIndexCommand command = new SplitIndexCommand(request, rsp,
        Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath(), indexDir3.getAbsolutePath()),
        null, null, new PlainIdRouter(), null, null, splitMethod);
    doSplit(command);

    directory = h.getCore().getDirectoryFactory().get(indexDir1.getAbsolutePath(),
        DirectoryFactory.DirContext.DEFAULT, h.getCore().getSolrConfig().indexConfig.lockType);
    DirectoryReader reader = DirectoryReader.open(directory);
    int numDocs1 = reader.numDocs();
    reader.close();
    h.getCore().getDirectoryFactory().release(directory);
    directory = h.getCore().getDirectoryFactory().get(indexDir2.getAbsolutePath(),
        DirectoryFactory.DirContext.DEFAULT, h.getCore().getSolrConfig().indexConfig.lockType);
    reader = DirectoryReader.open(directory);
    int numDocs2 = reader.numDocs();
    reader.close();
    h.getCore().getDirectoryFactory().release(directory);
    directory = h.getCore().getDirectoryFactory().get(indexDir3.getAbsolutePath(),
        DirectoryFactory.DirContext.DEFAULT, h.getCore().getSolrConfig().indexConfig.lockType);
    reader = DirectoryReader.open(directory);
    int numDocs3 = reader.numDocs();
    reader.close();
    h.getCore().getDirectoryFactory().release(directory);
    directory = null;
    assertEquals("split indexes lost some documents!", max, numDocs1 + numDocs2 + numDocs3);
    assertEquals("split index1 has wrong number of documents", max / 3, numDocs1);
    assertEquals("split index2 has wrong number of documents", max / 3, numDocs2);
    assertEquals("split index3 has wrong number of documents", max / 3, numDocs3);
  } finally {
    if (request != null) request.close(); // decrefs the searcher
    if (directory != null)  {
      // perhaps an assert failed, release the directory
      h.getCore().getDirectoryFactory().release(directory);
    }
  }
}
 
Example 20
Source File: TestFieldCache.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testNonIndexedFields() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(new StoredField("bogusbytes", "bogus"));
  doc.add(new StoredField("bogusshorts", "bogus"));
  doc.add(new StoredField("bogusints", "bogus"));
  doc.add(new StoredField("boguslongs", "bogus"));
  doc.add(new StoredField("bogusfloats", "bogus"));
  doc.add(new StoredField("bogusdoubles", "bogus"));
  doc.add(new StoredField("bogusterms", "bogus"));
  doc.add(new StoredField("bogustermsindex", "bogus"));
  doc.add(new StoredField("bogusmultivalued", "bogus"));
  doc.add(new StoredField("bogusbits", "bogus"));
  iw.addDocument(doc);
  DirectoryReader ir = iw.getReader();
  iw.close();
  
  LeafReader ar = getOnlyLeafReader(ir);
  
  final FieldCache cache = FieldCache.DEFAULT;
  cache.purgeAllCaches();
  assertEquals(0, cache.getCacheEntries().length);
  
  NumericDocValues ints = cache.getNumerics(ar, "bogusints", FieldCache.INT_POINT_PARSER);
  assertEquals(NO_MORE_DOCS, ints.nextDoc());
  
  NumericDocValues longs = cache.getNumerics(ar, "boguslongs", FieldCache.LONG_POINT_PARSER);
  assertEquals(NO_MORE_DOCS, longs.nextDoc());
  
  NumericDocValues floats = cache.getNumerics(ar, "bogusfloats", FieldCache.FLOAT_POINT_PARSER);
  assertEquals(NO_MORE_DOCS, floats.nextDoc());
  
  NumericDocValues doubles = cache.getNumerics(ar, "bogusdoubles", FieldCache.DOUBLE_POINT_PARSER);
  assertEquals(NO_MORE_DOCS, doubles.nextDoc());
  
  BinaryDocValues binaries = cache.getTerms(ar, "bogusterms");
  assertEquals(NO_MORE_DOCS, binaries.nextDoc());
  
  SortedDocValues sorted = cache.getTermsIndex(ar, "bogustermsindex");
  assertEquals(NO_MORE_DOCS, sorted.nextDoc());
  
  SortedSetDocValues sortedSet = cache.getDocTermOrds(ar, "bogusmultivalued", null);
  assertEquals(NO_MORE_DOCS, sortedSet.nextDoc());
  
  Bits bits = cache.getDocsWithField(ar, "bogusbits", null);
  assertFalse(bits.get(0));
  
  // check that we cached nothing
  assertEquals(0, cache.getCacheEntries().length);
  ir.close();
  dir.close();
}