org.apache.lucene.index.NoMergePolicy Java Examples

The following examples show how to use org.apache.lucene.index.NoMergePolicy. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Lucene.java    From crate with Apache License 2.0 6 votes vote down vote up
/**
 * This method removes all lucene files from the given directory. It will first try to delete all commit points / segments
 * files to ensure broken commits or corrupted indices will not be opened in the future. If any of the segment files can't be deleted
 * this operation fails.
 */
public static void cleanLuceneIndex(Directory directory) throws IOException {
    try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        for (final String file : directory.listAll()) {
            if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
                directory.deleteFile(file); // remove all segment_N files
            }
        }
    }
    try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
            .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
            .setMergePolicy(NoMergePolicy.INSTANCE) // no merges
            .setCommitOnClose(false) // no commits
            .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) { // force creation - don't append...
        // do nothing and close this will kick of IndexFileDeleter which will remove all pending files
    }
}
 
Example #2
Source File: TestTopDocsCollector.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testSharedCountCollectorManager() throws Exception {
  Query q = new MatchAllDocsQuery();
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE));
  Document doc = new Document();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc));
  w.flush();
  w.addDocuments(Arrays.asList(doc, doc));
  w.flush();
  IndexReader reader = DirectoryReader.open(w);
  assertEquals(2, reader.leaves().size());
  w.close();

  TopDocsCollector<ScoreDoc> collector = doSearchWithThreshold( 5, 10, q, reader);
  TopDocs tdc = doConcurrentSearchWithThreshold(5, 10, q, reader);
  TopDocs tdc2 = collector.topDocs();

  CheckHits.checkEqual(q, tdc.scoreDocs, tdc2.scoreDocs);

  reader.close();
  dir.close();
}
 
Example #3
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testPropagatesScorerSupplier() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = w.getReader();
  IndexSearcher searcher = newSearcher(reader);
  searcher.setQueryCachingPolicy(NEVER_CACHE);

  LRUQueryCache cache = new LRUQueryCache(1, 1000);
  searcher.setQueryCache(cache);

  AtomicBoolean scorerCreated = new AtomicBoolean(false);
  Query query = new DummyQuery2(scorerCreated);
  Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1);
  ScorerSupplier supplier = weight.scorerSupplier(searcher.getIndexReader().leaves().get(0));
  assertFalse(scorerCreated.get());
  supplier.get(random().nextLong() & 0x7FFFFFFFFFFFFFFFL);
  assertTrue(scorerCreated.get());

  reader.close();
  w.close();
  dir.close();
}
 
Example #4
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testReaderNotSuitedForCaching() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = new DummyDirectoryReader(w.getReader());
  IndexSearcher searcher = newSearcher(reader);
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);

  // don't cache if the reader does not expose a cache helper
  assertNull(reader.leaves().get(0).reader().getCoreCacheHelper());
  LRUQueryCache cache = new LRUQueryCache(2, 10000, context -> true, Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);
  assertEquals(0, searcher.count(new DummyQuery()));
  assertEquals(0, cache.getCacheCount());
  reader.close();
  w.close();
  dir.close();
}
 
Example #5
Source File: TestQueryBitSetProducer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testReaderNotSuitedForCaching() throws IOException{
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = new DummyDirectoryReader(w.getReader());

  QueryBitSetProducer producer = new QueryBitSetProducer(new MatchNoDocsQuery());
  assertNull(producer.getBitSet(reader.leaves().get(0)));
  assertEquals(0, producer.cache.size());

  producer = new QueryBitSetProducer(new MatchAllDocsQuery());
  BitSet bitSet = producer.getBitSet(reader.leaves().get(0));
  assertEquals(1, bitSet.length());
  assertEquals(true, bitSet.get(0));
  assertEquals(0, producer.cache.size());

  IOUtils.close(reader, w, dir);
}
 
Example #6
Source File: TestQueryBitSetProducer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testSimple() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = w.getReader();

  QueryBitSetProducer producer = new QueryBitSetProducer(new MatchNoDocsQuery());
  assertNull(producer.getBitSet(reader.leaves().get(0)));
  assertEquals(1, producer.cache.size());

  producer = new QueryBitSetProducer(new MatchAllDocsQuery());
  BitSet bitSet = producer.getBitSet(reader.leaves().get(0));
  assertEquals(1, bitSet.length());
  assertEquals(true, bitSet.get(0));
  assertEquals(1, producer.cache.size());

  IOUtils.close(reader, w, dir);
}
 
Example #7
Source File: TestPayloadScoreQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void beforeClass() throws Exception {
  directory = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
      newIndexWriterConfig(new PayloadAnalyzer())
          .setMergePolicy(NoMergePolicy.INSTANCE));
  //writer.infoStream = System.out;
  for (int i = 0; i < 300; i++) {
    Document doc = new Document();
    doc.add(newTextField("field", English.intToEnglish(i), Field.Store.YES));
    String txt = English.intToEnglish(i) +' '+English.intToEnglish(i+1);
    doc.add(newTextField("field2", txt, Field.Store.YES));
    writer.addDocument(doc);
  }
  reader = writer.getReader();
  writer.close();

  searcher = newSearcher(reader);
  searcher.setSimilarity(new JustScorePayloadSimilarity());
}
 
Example #8
Source File: TestMergePolicyConfig.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testNoMergePolicyFactoryConfig() throws Exception {
  initCore("solrconfig-nomergepolicyfactory.xml","schema-minimal.xml");
  IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore());
  NoMergePolicy mergePolicy = assertAndCast(NoMergePolicy.class,
      iwc.getMergePolicy());

  assertCommitSomeNewDocs();

  assertCommitSomeNewDocs();
  assertNumSegments(h.getCore(), 2);

  assertU(optimize());
  assertNumSegments(h.getCore(), 2);
  deleteCore();
  initCore("solrconfig-nomergepolicyfactory.xml","schema-minimal.xml");
  iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore());
  assertEquals(mergePolicy, iwc.getMergePolicy());

  UpdateHandler updater = h.getCore().getUpdateHandler();
  SolrQueryRequest req = req();
  CommitUpdateCommand cmtCmd = new CommitUpdateCommand(req, true);
  cmtCmd.maxOptimizeSegments = -1;
  expectThrows(IllegalArgumentException.class, () -> {
    updater.commit(cmtCmd);
  });

}
 
Example #9
Source File: TestTopFieldCollector.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testRelationVsTopDocsCount() throws Exception {
  Sort sort = new Sort(SortField.FIELD_SCORE, SortField.FIELD_DOC);
  try (Directory dir = newDirectory();
      IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE))) {
    Document doc = new Document();
    doc.add(new TextField("f", "foo bar", Store.NO));
    w.addDocuments(Arrays.asList(doc, doc, doc, doc, doc));
    w.flush();
    w.addDocuments(Arrays.asList(doc, doc, doc, doc, doc));
    w.flush();
    
    try (IndexReader reader = DirectoryReader.open(w)) {
      IndexSearcher searcher = new IndexSearcher(reader);
      TopFieldCollector collector = TopFieldCollector.create(sort, 2, 10);
      searcher.search(new TermQuery(new Term("f", "foo")), collector);
      assertEquals(10, collector.totalHits);
      assertEquals(TotalHits.Relation.EQUAL_TO, collector.totalHitsRelation);
      
      collector = TopFieldCollector.create(sort, 2, 2);
      searcher.search(new TermQuery(new Term("f", "foo")), collector);
      assertTrue(10 >= collector.totalHits);
      assertEquals(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO, collector.totalHitsRelation);
      
      collector = TopFieldCollector.create(sort, 10, 2);
      searcher.search(new TermQuery(new Term("f", "foo")), collector);
      assertEquals(10, collector.totalHits);
      assertEquals(TotalHits.Relation.EQUAL_TO, collector.totalHitsRelation);
    }
  }
}
 
Example #10
Source File: SolrSnapshotManager.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * This method deletes index files of the {@linkplain IndexCommit} for the specified generation number.
 *
 * @param core The Solr core
 * @param dir The index directory storing the snapshot.
 * @throws IOException in case of I/O errors.
 */

@SuppressWarnings({"try", "unused"})
private static void deleteSnapshotIndexFiles(SolrCore core, Directory dir, IndexDeletionPolicy delPolicy) throws IOException {
  IndexWriterConfig conf = core.getSolrConfig().indexConfig.toIndexWriterConfig(core);
  conf.setOpenMode(OpenMode.APPEND);
  conf.setMergePolicy(NoMergePolicy.INSTANCE);//Don't want to merge any commits here!
  conf.setIndexDeletionPolicy(delPolicy);
  conf.setCodec(core.getCodec());
  try (SolrIndexWriter iw = new SolrIndexWriter("SolrSnapshotCleaner", dir, conf)) {
    // Do nothing. The only purpose of opening index writer is to invoke the Lucene IndexDeletionPolicy#onInit
    // method so that we can cleanup the files associated with specified index commit.
    // Note the index writer creates a new commit during the close() operation (which is harmless).
  }
}
 
Example #11
Source File: TestTopDocsCollector.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testRelationVsTopDocsCount() throws Exception {
  try (Directory dir = newDirectory();
      IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE))) {
    Document doc = new Document();
    doc.add(new TextField("f", "foo bar", Store.NO));
    w.addDocuments(Arrays.asList(doc, doc, doc, doc, doc));
    w.flush();
    w.addDocuments(Arrays.asList(doc, doc, doc, doc, doc));
    w.flush();
    
    try (IndexReader reader = DirectoryReader.open(w)) {
      IndexSearcher searcher = new IndexSearcher(reader);
      TopScoreDocCollector collector = TopScoreDocCollector.create(2, null, 10);
      searcher.search(new TermQuery(new Term("f", "foo")), collector);
      assertEquals(10, collector.totalHits);
      assertEquals(TotalHits.Relation.EQUAL_TO, collector.totalHitsRelation);
      
      collector = TopScoreDocCollector.create(2, null, 2);
      searcher.search(new TermQuery(new Term("f", "foo")), collector);
      assertTrue(10 >= collector.totalHits);
      assertEquals(TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO, collector.totalHitsRelation);
      
      collector = TopScoreDocCollector.create(10, null, 2);
      searcher.search(new TermQuery(new Term("f", "foo")), collector);
      assertEquals(10, collector.totalHits);
      assertEquals(TotalHits.Relation.EQUAL_TO, collector.totalHitsRelation);
    }
  }
}
 
Example #12
Source File: TestInPlaceUpdatesDistrib.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void beforeSuperClass() throws Exception {
  schemaString = "schema-inplace-updates.xml";
  configString = "solrconfig-tlog.xml";

  // we need consistent segments that aren't re-ordered on merge because we're
  // asserting inplace updates happen by checking the internal [docid]
  systemSetPropertySolrTestsMergePolicyFactory(NoMergePolicyFactory.class.getName());

  randomizeUpdateLogImpl();

  initCore(configString, schemaString);
  
  // sanity check that autocommits are disabled
  assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoCommmitMaxTime);
  assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoSoftCommmitMaxTime);
  assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoCommmitMaxDocs);
  assertEquals(-1, h.getCore().getSolrConfig().getUpdateHandlerInfo().autoSoftCommmitMaxDocs);
  
  // assert that NoMergePolicy was chosen
  RefCounted<IndexWriter> iw = h.getCore().getSolrCoreState().getIndexWriter(h.getCore());
  try {
    IndexWriter writer = iw.get();
    assertTrue("Actual merge policy is: " + writer.getConfig().getMergePolicy(),
        writer.getConfig().getMergePolicy() instanceof NoMergePolicy); 
  } finally {
    iw.decref();
  }
}
 
Example #13
Source File: OverviewTestBase.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private Path createIndex() throws IOException {
  Path indexDir = createTempDir();

  Directory dir = newFSDirectory(indexDir);
  IndexWriterConfig config = new IndexWriterConfig(new MockAnalyzer(random()));
  config.setMergePolicy(NoMergePolicy.INSTANCE);  // see LUCENE-8998
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);

  Document doc1 = new Document();
  doc1.add(newStringField("f1", "1", Field.Store.NO));
  doc1.add(newTextField("f2", "a b c d e", Field.Store.NO));
  writer.addDocument(doc1);

  Document doc2 = new Document();
  doc2.add(newStringField("f1", "2", Field.Store.NO));
  doc2.add(new TextField("f2", "a c", Field.Store.NO));
  writer.addDocument(doc2);

  Document doc3 = new Document();
  doc3.add(newStringField("f1", "3", Field.Store.NO));
  doc3.add(newTextField("f2", "a f", Field.Store.NO));
  writer.addDocument(doc3);

  Map<String, String> userData = new HashMap<>();
  userData.put("data", "val");
  writer.w.setLiveCommitData(userData.entrySet());

  writer.commit();

  writer.close();
  dir.close();

  return indexDir;
}
 
Example #14
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testQueryNotSuitedForCaching() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = w.getReader();
  IndexSearcher searcher = newSearcher(reader);
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);

  LRUQueryCache cache = new LRUQueryCache(2, 10000, context -> true, Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);

  assertEquals(0, searcher.count(new NoCacheQuery()));
  assertEquals(0, cache.getCacheCount());

  // BooleanQuery wrapping an uncacheable query should also not be cached
  BooleanQuery bq = new BooleanQuery.Builder()
      .add(new NoCacheQuery(), Occur.MUST)
      .add(new TermQuery(new Term("field", "term")), Occur.MUST).build();
  assertEquals(0, searcher.count(bq));
  assertEquals(0, cache.getCacheCount());

  reader.close();
  w.close();
  dir.close();

}
 
Example #15
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testMinSegmentSizePredicate() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = w.getReader();
  IndexSearcher searcher = newSearcher(reader);
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);

  LRUQueryCache cache = new LRUQueryCache(2, 10000, new LRUQueryCache.MinSegmentSizePredicate(2, 0f), Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);
  searcher.count(new DummyQuery());
  assertEquals(0, cache.getCacheCount());

  cache = new LRUQueryCache(2, 10000, new LRUQueryCache.MinSegmentSizePredicate(1, 0f), Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);
  searcher.count(new DummyQuery());
  assertEquals(1, cache.getCacheCount());

  cache = new LRUQueryCache(2, 10000, new LRUQueryCache.MinSegmentSizePredicate(0, .6f), Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);
  searcher.count(new DummyQuery());
  assertEquals(1, cache.getCacheCount());

  w.addDocument(new Document());
  reader.close();
  reader = w.getReader();
  searcher = newSearcher(reader);
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);
  cache = new LRUQueryCache(2, 10000, new LRUQueryCache.MinSegmentSizePredicate(0, .6f), Float.POSITIVE_INFINITY);
  searcher.setQueryCache(cache);
  searcher.count(new DummyQuery());
  assertEquals(0, cache.getCacheCount());

  reader.close();
  w.close();
  dir.close();
}
 
Example #16
Source File: TestPhraseWildcardQuery.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public void setUp() throws Exception {
  super.setUp();
  directory = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), directory,
                                               newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE)); // do not accidentally merge
                                                                                                               // the two segments we create
                                                                                                               // here
  iw.setDoRandomForceMerge(false); // Keep the segments separated.
  addSegments(iw);
  reader = iw.getReader();
  iw.close();
  searcher = newSearcher(reader);
  assertEquals("test test relies on 2 segments", 2, searcher.getIndexReader().leaves().size());
}
 
Example #17
Source File: TestCheckJoinIndex.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testInconsistentDeletes() throws IOException {
  final Directory dir = newDirectory();
  final IndexWriterConfig iwc = newIndexWriterConfig();
  iwc.setMergePolicy(NoMergePolicy.INSTANCE); // so that deletions don't trigger merges
  final RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);

  List<Document> block = new ArrayList<>();
  final int numChildren = TestUtil.nextInt(random(), 1, 3);
  for (int i = 0; i < numChildren; ++i) {
    Document doc = new Document();
    doc.add(new StringField("child", Integer.toString(i), Store.NO));
    block.add(doc);
  }
  Document parent = new Document();
  parent.add(new StringField("parent", "true", Store.NO));
  block.add(parent);
  w.addDocuments(block);

  if (random().nextBoolean()) {
    w.deleteDocuments(new Term("parent", "true"));
  } else {
    // delete any of the children
    w.deleteDocuments(new Term("child", Integer.toString(random().nextInt(numChildren))));
  }

  final IndexReader reader = w.getReader();
  w.close();

  BitSetProducer parentsFilter = new QueryBitSetProducer(new TermQuery(new Term("parent", "true")));
  try {
    expectThrows(IllegalStateException.class, () -> CheckJoinIndex.check(reader, parentsFilter));
  } finally {
    reader.close();
    dir.close();
  }
}
 
Example #18
Source File: TestTaxonomyFacetCounts.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testSegmentsWithoutCategoriesOrResults() throws Exception {
  // tests the accumulator when there are segments with no results
  Directory indexDir = newDirectory();
  Directory taxoDir = newDirectory();
  
  IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
  iwc.setMergePolicy(NoMergePolicy.INSTANCE); // prevent merges
  IndexWriter indexWriter = new IndexWriter(indexDir, iwc);

  TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
  FacetsConfig config = new FacetsConfig();
  indexTwoDocs(taxoWriter, indexWriter, config, false); // 1st segment, no content, with categories
  indexTwoDocs(taxoWriter, indexWriter, null, true);         // 2nd segment, with content, no categories
  indexTwoDocs(taxoWriter, indexWriter, config, true);  // 3rd segment ok
  indexTwoDocs(taxoWriter, indexWriter, null, false);        // 4th segment, no content, or categories
  indexTwoDocs(taxoWriter, indexWriter, null, true);         // 5th segment, with content, no categories
  indexTwoDocs(taxoWriter, indexWriter, config, true);  // 6th segment, with content, with categories
  indexTwoDocs(taxoWriter, indexWriter, null, true);         // 7th segment, with content, no categories
  indexWriter.close();
  IOUtils.close(taxoWriter);

  DirectoryReader indexReader = DirectoryReader.open(indexDir);
  TaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
  IndexSearcher indexSearcher = newSearcher(indexReader);
  
  // search for "f:a", only segments 1 and 3 should match results
  Query q = new TermQuery(new Term("f", "a"));
  FacetsCollector sfc = new FacetsCollector();
  indexSearcher.search(q, sfc);
  Facets facets = getTaxonomyFacetCounts(taxoReader, config, sfc);
  FacetResult result = facets.getTopChildren(10, "A");
  assertEquals("wrong number of children", 2, result.labelValues.length);
  for (LabelAndValue labelValue : result.labelValues) {
    assertEquals("wrong weight for child " + labelValue.label, 2, labelValue.value.intValue());
  }

  IOUtils.close(indexReader, taxoReader, indexDir, taxoDir);
}
 
Example #19
Source File: TestTaxonomyFacetCounts2.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void beforeClassCountingFacetsAggregatorTest() throws Exception {
  indexDir = newDirectory();
  taxoDir = newDirectory();
  
  // create an index which has:
  // 1. Segment with no categories, but matching results
  // 2. Segment w/ categories, but no results
  // 3. Segment w/ categories and results
  // 4. Segment w/ categories, but only some results
  
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  conf.setMergePolicy(NoMergePolicy.INSTANCE); // prevent merges, so we can control the index segments
  IndexWriter indexWriter = new IndexWriter(indexDir, conf);
  TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);

  allExpectedCounts = newCounts();
  termExpectedCounts = newCounts();
  
  // segment w/ no categories
  indexDocsNoFacets(indexWriter);

  // segment w/ categories, no content
  indexDocsWithFacetsNoTerms(indexWriter, taxoWriter, allExpectedCounts);

  // segment w/ categories and content
  indexDocsWithFacetsAndTerms(indexWriter, taxoWriter, allExpectedCounts);
  
  // segment w/ categories and some content
  indexDocsWithFacetsAndSomeTerms(indexWriter, taxoWriter, allExpectedCounts);

  indexWriter.close();
  IOUtils.close(taxoWriter);
}
 
Example #20
Source File: MergePolicyConfig.java    From crate with Apache License 2.0 4 votes vote down vote up
MergePolicy getMergePolicy() {
    return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE;
}
 
Example #21
Source File: WrapperMergePolicyFactoryTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
protected MergePolicy getDefaultWrappedMergePolicy() {
  return NoMergePolicy.INSTANCE;
}
 
Example #22
Source File: WrapperMergePolicyFactoryTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testReturnsDefaultMergePolicyIfNoneSpecified() {
  final MergePolicyFactoryArgs args = new MergePolicyFactoryArgs();
  MergePolicyFactory mpf = new DefaultingWrapperMergePolicyFactory(resourceLoader, args, null);
  assertSame(mpf.getMergePolicy(), NoMergePolicy.INSTANCE);
}
 
Example #23
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testBulkScorerLocking() throws Exception {

    Directory dir = newDirectory();
    IndexWriterConfig iwc = newIndexWriterConfig()
        .setMergePolicy(NoMergePolicy.INSTANCE)
        // the test framework sometimes sets crazy low values, prevent this since we are indexing many docs
        .setMaxBufferedDocs(-1);
    IndexWriter w = new IndexWriter(dir, iwc);

    final int numDocs = atLeast(10);
    Document emptyDoc = new Document();
    for (int d = 0; d < numDocs; ++d) {
      for (int i = random().nextInt(5000); i >= 0; --i) {
        w.addDocument(emptyDoc);
      }
      Document doc = new Document();
      for (String value : Arrays.asList("foo", "bar", "baz")) {
        if (random().nextBoolean()) {
          doc.add(new StringField("field", value, Store.NO));
        }
      }
    }
    for (int i = TestUtil.nextInt(random(), 3000, 5000); i >= 0; --i) {
      w.addDocument(emptyDoc);
    }
    if (random().nextBoolean()) {
      w.forceMerge(1);
    }

    DirectoryReader reader = DirectoryReader.open(w);
    DirectoryReader noCacheReader = new DummyDirectoryReader(reader);

    LRUQueryCache cache = new LRUQueryCache(1, 100000, context -> true, Float.POSITIVE_INFINITY);
    IndexSearcher searcher = new AssertingIndexSearcher(random(), reader);
    searcher.setQueryCache(cache);
    searcher.setQueryCachingPolicy(ALWAYS_CACHE);

    Query query = new ConstantScoreQuery(new BooleanQuery.Builder()
        .add(new BoostQuery(new TermQuery(new Term("field", "foo")), 3), Occur.SHOULD)
        .add(new BoostQuery(new TermQuery(new Term("field", "bar")), 3), Occur.SHOULD)
        .add(new BoostQuery(new TermQuery(new Term("field", "baz")), 3), Occur.SHOULD)
        .build());

    searcher.search(query, 1);

    IndexSearcher noCacheHelperSearcher = new AssertingIndexSearcher(random(), noCacheReader);
    noCacheHelperSearcher.setQueryCache(cache);
    noCacheHelperSearcher.setQueryCachingPolicy(ALWAYS_CACHE);
    noCacheHelperSearcher.search(query, 1);

    Thread t = new Thread(() -> {
      try {
        noCacheReader.close();
        w.close();
        dir.close();
      }
      catch (Exception e) {
        throw new RuntimeException(e);
      }
    });
    t.start();
    t.join();
  }
 
Example #24
Source File: NoMergePolicyFactory.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
protected MergePolicy getMergePolicyInstance() {
  return NoMergePolicy.INSTANCE;
}
 
Example #25
Source File: TestControlledRealTimeReopenThread.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testThreadStarvationNoDeleteNRTReader() throws IOException, InterruptedException {
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  conf.setMergePolicy(NoMergePolicy.INSTANCE);
  Directory d = newDirectory();
  final CountDownLatch latch = new CountDownLatch(1);
  final CountDownLatch signal = new CountDownLatch(1);

  LatchedIndexWriter writer = new LatchedIndexWriter(d, conf, latch, signal);
  final SearcherManager manager = new SearcherManager(writer, false, false, null);
  Document doc = new Document();
  doc.add(newTextField("test", "test", Field.Store.YES));
  writer.addDocument(doc);
  manager.maybeRefresh();
  Thread t = new Thread() {
    @Override
    public void run() {
      try {
        signal.await();
        manager.maybeRefresh();
        writer.deleteDocuments(new TermQuery(new Term("foo", "barista")));
        manager.maybeRefresh(); // kick off another reopen so we inc. the internal gen
      } catch (Exception e) {
        e.printStackTrace();
      } finally {
        latch.countDown(); // let the add below finish
      }
    }
  };
  t.start();
  writer.waitAfterUpdate = true; // wait in addDocument to let some reopens go through

  final long lastGen = writer.updateDocument(new Term("foo", "bar"), doc); // once this returns the doc is already reflected in the last reopen

  // We now eagerly resolve deletes so the manager should see it after update:
  assertTrue(manager.isSearcherCurrent());
  
  IndexSearcher searcher = manager.acquire();
  try {
    assertEquals(2, searcher.getIndexReader().numDocs());
  } finally {
    manager.release(searcher);
  }
  final ControlledRealTimeReopenThread<IndexSearcher> thread = new ControlledRealTimeReopenThread<>(writer, manager, 0.01, 0.01);
  thread.start(); // start reopening
  if (VERBOSE) {
    System.out.println("waiting now for generation " + lastGen);
  }
  
  final AtomicBoolean finished = new AtomicBoolean(false);
  Thread waiter = new Thread() {
    @Override
    public void run() {
      try {
        thread.waitForGeneration(lastGen);
      } catch (InterruptedException ie) {
        Thread.currentThread().interrupt();
        throw new RuntimeException(ie);
      }
      finished.set(true);
    }
  };
  waiter.start();
  manager.maybeRefresh();
  waiter.join(1000);
  if (!finished.get()) {
    waiter.interrupt();
    fail("thread deadlocked on waitForGeneration");
  }
  thread.close();
  thread.join();
  writer.close();
  IOUtils.close(manager, d);
}
 
Example #26
Source File: TestTopFieldCollector.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testTotalHitsWithScore() throws Exception {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE));
  Document doc = new Document();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc));
  w.flush();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc, doc, doc));
  w.flush();
  IndexReader reader = DirectoryReader.open(w);
  assertEquals(2, reader.leaves().size());
  w.close();

  for (int totalHitsThreshold = 0; totalHitsThreshold < 20; ++ totalHitsThreshold) {
    Sort sort = new Sort(FIELD_SCORE, new SortField("foo", SortField.Type.LONG));
    TopFieldCollector collector = TopFieldCollector.create(sort, 2, null, totalHitsThreshold);
    ScoreAndDoc scorer = new ScoreAndDoc();

    LeafCollector leafCollector = collector.getLeafCollector(reader.leaves().get(0));
    leafCollector.setScorer(scorer);

    scorer.doc = 0;
    scorer.score = 3;
    leafCollector.collect(0);

    scorer.doc = 1;
    scorer.score = 3;
    leafCollector.collect(1);

    leafCollector = collector.getLeafCollector(reader.leaves().get(1));
    leafCollector.setScorer(scorer);

    scorer.doc = 1;
    scorer.score = 3;
    leafCollector.collect(1);

    scorer.doc = 5;
    scorer.score = 4;
    leafCollector.collect(1);

    TopDocs topDocs = collector.topDocs();
    assertEquals(totalHitsThreshold < 4, scorer.minCompetitiveScore != null);
    assertEquals(new TotalHits(4, totalHitsThreshold < 4 ? TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO : TotalHits.Relation.EQUAL_TO), topDocs.totalHits);
  }

  reader.close();
  dir.close();
}
 
Example #27
Source File: TestTopFieldCollector.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testSetMinCompetitiveScore() throws Exception {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE));
  Document doc = new Document();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc));
  w.flush();
  w.addDocuments(Arrays.asList(doc, doc));
  w.flush();
  IndexReader reader = DirectoryReader.open(w);
  assertEquals(2, reader.leaves().size());
  w.close();

  Sort sort = new Sort(FIELD_SCORE, new SortField("foo", SortField.Type.LONG));
  TopFieldCollector collector = TopFieldCollector.create(sort, 2, null, 2);
  ScoreAndDoc scorer = new ScoreAndDoc();

  LeafCollector leafCollector = collector.getLeafCollector(reader.leaves().get(0));
  leafCollector.setScorer(scorer);
  assertNull(scorer.minCompetitiveScore);

  scorer.doc = 0;
  scorer.score = 1;
  leafCollector.collect(0);
  assertNull(scorer.minCompetitiveScore);

  scorer.doc = 1;
  scorer.score = 2;
  leafCollector.collect(1);
  assertNull(scorer.minCompetitiveScore);
  
  scorer.doc = 2;
  scorer.score = 3;
  leafCollector.collect(2);
  assertEquals(2f, scorer.minCompetitiveScore, 0f);

  scorer.doc = 3;
  scorer.score = 0.5f;
  // Make sure we do not call setMinCompetitiveScore for non-competitive hits
  scorer.minCompetitiveScore = Float.NaN;
  leafCollector.collect(3);
  assertTrue(Float.isNaN(scorer.minCompetitiveScore));

  scorer.doc = 4;
  scorer.score = 4;
  leafCollector.collect(4);
  assertEquals(3f, scorer.minCompetitiveScore, 0f);

  // Make sure the min score is set on scorers on new segments
  scorer = new ScoreAndDoc();
  leafCollector = collector.getLeafCollector(reader.leaves().get(1));
  leafCollector.setScorer(scorer);
  assertEquals(3f, scorer.minCompetitiveScore, 0f);

  scorer.doc = 0;
  scorer.score = 1;
  leafCollector.collect(0);
  assertEquals(3f, scorer.minCompetitiveScore, 0f);

  scorer.doc = 1;
  scorer.score = 4;
  leafCollector.collect(1);
  assertEquals(4f, scorer.minCompetitiveScore, 0f);

  reader.close();
  dir.close();
}
 
Example #28
Source File: TestTopDocsCollector.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testTotalHits() throws Exception {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE));
  Document doc = new Document();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc));
  w.flush();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc, doc, doc));
  w.flush();
  IndexReader reader = DirectoryReader.open(w);
  assertEquals(2, reader.leaves().size());
  w.close();

  for (int totalHitsThreshold = 0; totalHitsThreshold < 20; ++ totalHitsThreshold) {
    TopScoreDocCollector collector = TopScoreDocCollector.create(2, null, totalHitsThreshold);
    ScoreAndDoc scorer = new ScoreAndDoc();

    LeafCollector leafCollector = collector.getLeafCollector(reader.leaves().get(0));
    leafCollector.setScorer(scorer);

    scorer.doc = 0;
    scorer.score = 3;
    leafCollector.collect(0);

    scorer.doc = 1;
    scorer.score = 3;
    leafCollector.collect(1);

    leafCollector = collector.getLeafCollector(reader.leaves().get(1));
    leafCollector.setScorer(scorer);

    scorer.doc = 1;
    scorer.score = 3;
    leafCollector.collect(1);

    scorer.doc = 5;
    scorer.score = 4;
    leafCollector.collect(1);

    TopDocs topDocs = collector.topDocs();
    assertEquals(4, topDocs.totalHits.value);
    assertEquals(totalHitsThreshold < 4, scorer.minCompetitiveScore != null);
    assertEquals(new TotalHits(4, totalHitsThreshold < 4 ? TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO : TotalHits.Relation.EQUAL_TO), topDocs.totalHits);
  }

  reader.close();
  dir.close();
}
 
Example #29
Source File: TestTopDocsCollector.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testSetMinCompetitiveScore() throws Exception {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE));
  Document doc = new Document();
  w.addDocuments(Arrays.asList(doc, doc, doc, doc));
  w.flush();
  w.addDocuments(Arrays.asList(doc, doc));
  w.flush();
  IndexReader reader = DirectoryReader.open(w);
  assertEquals(2, reader.leaves().size());
  w.close();

  TopScoreDocCollector collector = TopScoreDocCollector.create(2, null, 2);
  ScoreAndDoc scorer = new ScoreAndDoc();

  LeafCollector leafCollector = collector.getLeafCollector(reader.leaves().get(0));
  leafCollector.setScorer(scorer);
  assertNull(scorer.minCompetitiveScore);

  scorer.doc = 0;
  scorer.score = 1;
  leafCollector.collect(0);
  assertNull(scorer.minCompetitiveScore);

  scorer.doc = 1;
  scorer.score = 2;
  leafCollector.collect(1);
  assertNull(scorer.minCompetitiveScore);
  
  scorer.doc = 2;
  scorer.score = 3;
  leafCollector.collect(2);
  assertEquals(Math.nextUp(2f), scorer.minCompetitiveScore, 0f);

  scorer.doc = 3;
  scorer.score = 0.5f;
  // Make sure we do not call setMinCompetitiveScore for non-competitive hits
  scorer.minCompetitiveScore = Float.NaN;
  leafCollector.collect(3);
  assertTrue(Float.isNaN(scorer.minCompetitiveScore));

  scorer.doc = 4;
  scorer.score = 4;
  leafCollector.collect(4);
  assertEquals(Math.nextUp(3f), scorer.minCompetitiveScore, 0f);

  // Make sure the min score is set on scorers on new segments
  scorer = new ScoreAndDoc();
  leafCollector = collector.getLeafCollector(reader.leaves().get(1));
  leafCollector.setScorer(scorer);
  assertEquals(Math.nextUp(3f), scorer.minCompetitiveScore, 0f);

  scorer.doc = 0;
  scorer.score = 1;
  leafCollector.collect(0);
  assertEquals(Math.nextUp(3f), scorer.minCompetitiveScore, 0f);

  scorer.doc = 1;
  scorer.score = 4;
  leafCollector.collect(1);
  assertEquals(Math.nextUp(4f), scorer.minCompetitiveScore, 0f);

  reader.close();
  dir.close();
}
 
Example #30
Source File: MergePolicyConfig.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
public MergePolicy getMergePolicy() {
    return mergesEnabled ? mergePolicy : NoMergePolicy.INSTANCE;
}