org.apache.lucene.index.RandomIndexWriter Java Examples

The following examples show how to use org.apache.lucene.index.RandomIndexWriter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestPayloadScoreQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void beforeClass() throws Exception {
  directory = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
      newIndexWriterConfig(new PayloadAnalyzer())
          .setMergePolicy(NoMergePolicy.INSTANCE));
  //writer.infoStream = System.out;
  for (int i = 0; i < 300; i++) {
    Document doc = new Document();
    doc.add(newTextField("field", English.intToEnglish(i), Field.Store.YES));
    String txt = English.intToEnglish(i) +' '+English.intToEnglish(i+1);
    doc.add(newTextField("field2", txt, Field.Store.YES));
    writer.addDocument(doc);
  }
  reader = writer.getReader();
  writer.close();

  searcher = newSearcher(reader);
  searcher.setSimilarity(new JustScorePayloadSimilarity());
}
 
Example #2
Source File: TestIDVersionPostingsFormat.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testMissingPayload() throws Exception {
  Directory dir = newDirectory();

  // MockAnalyzer minus maybePayload else it sometimes stuffs in an 8-byte payload!
  Analyzer a = new Analyzer() {
      @Override
      public TokenStreamComponents createComponents(String fieldName) {
        MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, true, 100);
        tokenizer.setEnableChecks(true);
        MockTokenFilter filt = new MockTokenFilter(tokenizer, MockTokenFilter.EMPTY_STOPSET);
        return new TokenStreamComponents(tokenizer, filt);
      }
    };
  IndexWriterConfig iwc = newIndexWriterConfig(a);
  iwc.setCodec(TestUtil.alwaysPostingsFormat(new IDVersionPostingsFormat()));
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc, false);
  Document doc = new Document();
  doc.add(newTextField("id", "id", Field.Store.NO));
  expectThrows(IllegalArgumentException.class, () -> {
    w.addDocument(doc);
    w.commit(false);
  });
           
  w.close();
  dir.close();
}
 
Example #3
Source File: TestIndexSortSortedNumericDocValuesRangeQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testRewriteFallbackQuery() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  writer.addDocument(new Document());
  IndexReader reader = writer.getReader();

  // Create an (unrealistic) fallback query that is sure to be rewritten.
  Query fallbackQuery = new BooleanQuery.Builder().build();
  Query query = new IndexSortSortedNumericDocValuesRangeQuery("field", 1, 42, fallbackQuery);

  Query rewrittenQuery = query.rewrite(reader);
  assertNotEquals(query, rewrittenQuery);
  assertThat(rewrittenQuery, instanceOf(IndexSortSortedNumericDocValuesRangeQuery.class));

  IndexSortSortedNumericDocValuesRangeQuery rangeQuery = (IndexSortSortedNumericDocValuesRangeQuery) rewrittenQuery;
  assertEquals(new MatchNoDocsQuery(), rangeQuery.getFallbackQuery());

  writer.close();
  reader.close();
  dir.close();
}
 
Example #4
Source File: TestTermAutomatonQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testTermDoesNotExist() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(newTextField("field", "x y z", Field.Store.NO));
  w.addDocument(doc);

  IndexReader r = w.getReader();
  IndexSearcher s = newSearcher(r);

  TokenStream ts = new CannedTokenStream(new Token[] {
      token("a", 1, 1),
    });

  TermAutomatonQuery q = new TokenStreamToTermAutomatonQuery().toQuery("field", ts);
  // System.out.println("DOT: " + q.toDot());
  assertEquals(0, s.search(q, 1).totalHits.value);

  w.close();
  r.close();
  dir.close();
}
 
Example #5
Source File: BaseXYPointTestCase.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** test we can search for a point */
public void testDistanceBasics() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  // add a doc with a location
  Document document = new Document();
  addPointToDoc("field", document, 18.313694f, -65.227444f);
  writer.addDocument(document);
  
  // search within 50km and verify we found our doc
  IndexReader reader = writer.getReader();
  IndexSearcher searcher = newSearcher(reader);
  assertEquals(1, searcher.count(newDistanceQuery("field", 18, -65, 20)));

  reader.close();
  writer.close();
  dir.close();
}
 
Example #6
Source File: TestLongValuesSource.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public void setUp() throws Exception {
  super.setUp();
  dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  int numDocs = TestUtil.nextInt(random(), 2049, 4000);
  int leastValue = 45;
  for (int i = 0; i < numDocs; i++) {
    Document document = new Document();
    document.add(newTextField("english", English.intToEnglish(i), Field.Store.NO));
    document.add(newTextField("oddeven", (i % 2 == 0) ? "even" : "odd", Field.Store.NO));
    document.add(new NumericDocValuesField("int", random().nextInt()));
    document.add(new NumericDocValuesField("long", random().nextLong()));
    if (i == 545)
      document.add(new NumericDocValuesField("onefield", LEAST_LONG_VALUE));
    iw.addDocument(document);
  }
  reader = iw.getReader();
  iw.close();
  searcher = newSearcher(reader);
}
 
Example #7
Source File: TestBooleanSimilarity.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testPhraseScoreIsEqualToBoost() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir,
      newIndexWriterConfig().setSimilarity(new BooleanSimilarity()));
  Document doc = new Document();
  doc.add(new TextField("foo", "bar baz quux", Store.NO));
  w.addDocument(doc);

  DirectoryReader reader = w.getReader();
  w.close();
  IndexSearcher searcher = newSearcher(reader);
  searcher.setSimilarity(new BooleanSimilarity());

  PhraseQuery query = new PhraseQuery(2, "foo", "bar", "quux");

  TopDocs topDocs = searcher.search(query, 2);
  assertEquals(1, topDocs.totalHits.value);
  assertEquals(1f, topDocs.scoreDocs[0].score, 0f);

  topDocs = searcher.search(new BoostQuery(query, 7), 2);
  assertEquals(1, topDocs.totalHits.value);
  assertEquals(7f, topDocs.scoreDocs[0].score, 0f);

  reader.close();
  dir.close();
}
 
Example #8
Source File: TestUnifiedHighlighterMTQ.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testRussianPrefixQuery() throws IOException {
  Analyzer analyzer = new StandardAnalyzer();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, analyzer);
  String field = "title";
  Document doc = new Document();
  doc.add(new Field(field, "я", fieldType)); // Russian char; uses 2 UTF8 bytes
  iw.addDocument(doc);
  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  Query query = new PrefixQuery(new Term(field, "я"));
  TopDocs topDocs = searcher.search(query, 1);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, analyzer);
  String[] snippets = highlighter.highlight(field, query, topDocs);
  assertEquals("[<b>я</b>]", Arrays.toString(snippets));
  ir.close();
}
 
Example #9
Source File: TestUnifiedHighlighterTermIntervals.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testBasics() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
  iw.addDocument(doc);
  body.setStringValue("Highlighting the first term. Hope it works.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  Query query = new IntervalQuery("body", Intervals.term("highlighting"));
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(2, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs);
  assertEquals(2, snippets.length);
  assertEquals("Just a test <b>highlighting</b> from postings. ", snippets[0]);
  assertEquals("<b>Highlighting</b> the first term. ", snippets[1]);
  ir.close();
}
 
Example #10
Source File: TestPayloadSpanUtil.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testPayloadSpanUtil() throws Exception {
  Directory directory = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
      newIndexWriterConfig(new PayloadAnalyzer()).setSimilarity(new ClassicSimilarity()));

  Document doc = new Document();
  doc.add(newTextField(FIELD, "xx rr yy mm  pp", Field.Store.YES));
  writer.addDocument(doc);

  IndexReader reader = writer.getReader();
  writer.close();
  IndexSearcher searcher = newSearcher(reader);

  PayloadSpanUtil psu = new PayloadSpanUtil(searcher.getTopReaderContext());

  Collection<byte[]> payloads = psu.getPayloadsForQuery(new TermQuery(new Term(FIELD, "rr")));
  if(VERBOSE) {
    System.out.println("Num payloads:" + payloads.size());
    for (final byte [] bytes : payloads) {
      System.out.println(new String(bytes, StandardCharsets.UTF_8));
    }
  }
  reader.close();
  directory.close();
}
 
Example #11
Source File: TestIndexSortSortedNumericDocValuesRangeQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void testIndexSortDocValuesWithSingleValue(boolean reverse) throws IOException{
  Directory dir = newDirectory();

  IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
  Sort indexSort = new Sort(new SortedNumericSortField("field", SortField.Type.LONG, reverse));
  iwc.setIndexSort(indexSort);
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);

  writer.addDocument(createDocument("field", 42));

  DirectoryReader reader = writer.getReader();
  IndexSearcher searcher = newSearcher(reader);

  assertEquals(1, searcher.count(createQuery("field", 42, 43)));
  assertEquals(1, searcher.count(createQuery("field", 42, 42)));
  assertEquals(0, searcher.count(createQuery("field", 41, 41)));
  assertEquals(0, searcher.count(createQuery("field", 43, 43)));

  writer.close();
  reader.close();
  dir.close();
}
 
Example #12
Source File: TestDocument.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testPositionIncrementMultiFields() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  writer.addDocument(makeDocumentWithFields());
  IndexReader reader = writer.getReader();
  
  IndexSearcher searcher = newSearcher(reader);
  PhraseQuery query = new PhraseQuery("indexed_not_tokenized", "test1", "test2");
  
  ScoreDoc[] hits = searcher.search(query, 1000).scoreDocs;
  assertEquals(1, hits.length);
  
  doAssert(searcher.doc(hits[0].doc), true);
  writer.close();
  reader.close();
  dir.close();    
}
 
Example #13
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testPropagatesScorerSupplier() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = w.getReader();
  IndexSearcher searcher = newSearcher(reader);
  searcher.setQueryCachingPolicy(NEVER_CACHE);

  LRUQueryCache cache = new LRUQueryCache(1, 1000);
  searcher.setQueryCache(cache);

  AtomicBoolean scorerCreated = new AtomicBoolean(false);
  Query query = new DummyQuery2(scorerCreated);
  Weight weight = searcher.createWeight(searcher.rewrite(query), ScoreMode.COMPLETE_NO_SCORES, 1);
  ScorerSupplier supplier = weight.scorerSupplier(searcher.getIndexReader().leaves().get(0));
  assertFalse(scorerCreated.get());
  supplier.get(random().nextLong() & 0x7FFFFFFFFFFFFFFFL);
  assertTrue(scorerCreated.get());

  reader.close();
  w.close();
  dir.close();
}
 
Example #14
Source File: TestSimilarityProvider.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public void setUp() throws Exception {
  super.setUp();
  directory = newDirectory();
  PerFieldSimilarityWrapper sim = new ExampleSimilarityProvider();
  IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random())).setSimilarity(sim);
  RandomIndexWriter iw = new RandomIndexWriter(random(), directory, iwc);
  Document doc = new Document();
  Field field = newTextField("foo", "", Field.Store.NO);
  doc.add(field);
  Field field2 = newTextField("bar", "", Field.Store.NO);
  doc.add(field2);

  field.setStringValue("quick brown fox");
  field2.setStringValue("quick brown fox");
  iw.addDocument(doc);
  field.setStringValue("jumps over lazy brown dog");
  field2.setStringValue("jumps over lazy brown dog");
  iw.addDocument(doc);
  reader = iw.getReader();
  iw.close();
  searcher = newSearcher(reader);
  searcher.setSimilarity(sim);
}
 
Example #15
Source File: TestOrdinalMappingLeafReader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void buildIndexWithFacets(Directory indexDir, Directory taxoDir, boolean asc) throws IOException {
  IndexWriterConfig config = newIndexWriterConfig(null);
  RandomIndexWriter writer = new RandomIndexWriter(random(), indexDir, config);
  
  DirectoryTaxonomyWriter taxonomyWriter = new DirectoryTaxonomyWriter(taxoDir);
  for (int i = 1; i <= NUM_DOCS; i++) {
    Document doc = new Document();
    for (int j = i; j <= NUM_DOCS; j++) {
      int facetValue = asc ? j: NUM_DOCS - j;
      doc.add(new FacetField("tag", Integer.toString(facetValue)));
    }
    // add a facet under default dim config
    doc.add(new FacetField("id", Integer.toString(i)));
    
    // make sure OrdinalMappingLeafReader ignores non-facet BinaryDocValues fields
    doc.add(new BinaryDocValuesField("bdv", new BytesRef(Integer.toString(i))));
    doc.add(new BinaryDocValuesField("cbdv", new BytesRef(Integer.toString(i*2))));
    writer.addDocument(facetConfig.build(taxonomyWriter, doc));
  }
  taxonomyWriter.commit();
  taxonomyWriter.close();
  writer.commit();
  writer.close();
}
 
Example #16
Source File: TestPrefixCompletionQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testEmptyPrefixQuery() throws Exception {
  Analyzer analyzer = new MockAnalyzer(random());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
  Document document = new Document();
  document.add(new SuggestField("suggest_field", "suggestion1", 1));
  iw.addDocument(document);

  if (rarely()) {
    iw.commit();
  }

  DirectoryReader reader = iw.getReader();
  SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
  PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", ""));

  TopSuggestDocs suggest = suggestIndexSearcher.suggest(query, 5, false);
  assertEquals(0, suggest.scoreDocs.length);

  reader.close();
  iw.close();
}
 
Example #17
Source File: TestLegacyTerms.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testDoubleFieldMinMax() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  int numDocs = atLeast(100);
  double minValue = Double.POSITIVE_INFINITY;
  double maxValue = Double.NEGATIVE_INFINITY;
  for(int i=0;i<numDocs;i++ ){
    Document doc = new Document();
    double num = random().nextDouble();
    minValue = Math.min(num, minValue);
    maxValue = Math.max(num, maxValue);
    doc.add(new LegacyDoubleField("field", num, Field.Store.NO));
    w.addDocument(doc);
  }
  
  IndexReader r = w.getReader();

  Terms terms = MultiTerms.getTerms(r, "field");

  assertEquals(minValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMinLong(terms)), 0.0);
  assertEquals(maxValue, NumericUtils.sortableLongToDouble(LegacyNumericUtils.getMaxLong(terms)), 0.0);

  r.close();
  w.close();
  dir.close();
}
 
Example #18
Source File: TestMultiPhraseQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testPhrasePrefixWithBooleanQuery() throws IOException {
  Directory indexStore = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), indexStore);
  add("This is a test", "object", writer);
  add("a note", "note", writer);
  
  IndexReader reader = writer.getReader();
  IndexSearcher searcher = newSearcher(reader);
  
  // This query will be equivalent to +type:note +body:"a t*"
  BooleanQuery.Builder q = new BooleanQuery.Builder();
  q.add(new TermQuery(new Term("type", "note")), BooleanClause.Occur.MUST);
  
  MultiPhraseQuery.Builder troubleBuilder = new MultiPhraseQuery.Builder();
  troubleBuilder.add(new Term("body", "a"));
  troubleBuilder
      .add(new Term[] {new Term("body", "test"), new Term("body", "this")});
  q.add(troubleBuilder.build(), BooleanClause.Occur.MUST);
  
  // exception will be thrown here without fix for #35626:
  ScoreDoc[] hits = searcher.search(q.build(), 1000).scoreDocs;
  assertEquals("Wrong number of hits", 0, hits.length);
  writer.close();
  reader.close();
  indexStore.close();
}
 
Example #19
Source File: TestIntervals.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setupIndex() throws IOException {
  directory = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
      newIndexWriterConfig(analyzer).setMergePolicy(newLogMergePolicy()));
  for (int i = 0; i < field1_docs.length; i++) {
    Document doc = new Document();
    doc.add(new Field("field1", field1_docs[i], FIELD_TYPE));
    doc.add(new Field("field2", field2_docs[i], FIELD_TYPE));
    doc.add(new StringField("id", Integer.toString(i), Field.Store.NO));
    doc.add(new NumericDocValuesField("id", i));
    writer.addDocument(doc);
  }
  writer.close();
  searcher = new IndexSearcher(DirectoryReader.open(directory));
}
 
Example #20
Source File: TestTaxonomyFacetAssociations.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testNoHierarchy() throws Exception {
  Directory dir = newDirectory();
  Directory taxoDir = newDirectory();
  
  TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
  FacetsConfig config = new FacetsConfig();
  config.setHierarchical("a", true);
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  Document doc = new Document();
  doc.add(new IntAssociationFacetField(14, "a", "x"));
  expectThrows(IllegalArgumentException.class, () -> {
    writer.addDocument(config.build(taxoWriter, doc));
  });

  writer.close();
  IOUtils.close(taxoWriter, dir, taxoDir);
}
 
Example #21
Source File: DocumentValueSourceDictionaryTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testValueSourceEmptyReader() throws IOException {
  Directory dir = newDirectory();
  Analyzer analyzer = new MockAnalyzer(random());
  IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
  iwc.setMergePolicy(newLogMergePolicy());
  // Make sure the index is created?
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, iwc);
  writer.commit();
  writer.close();
  IndexReader ir = DirectoryReader.open(dir);
  Dictionary dictionary = new DocumentValueSourceDictionary(ir, FIELD_NAME, LongValuesSource.constant(10), PAYLOAD_FIELD_NAME);
  InputIterator inputIterator = dictionary.getEntryIterator();

  assertNull(inputIterator.next());
  assertEquals(inputIterator.weight(), 0);
  assertNull(inputIterator.payload());

  IOUtils.close(ir, analyzer, dir);
}
 
Example #22
Source File: TestUnifiedHighlighter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Make sure highlighter we can customize how emtpy
 * highlight is returned.
 */
public void testCustomEmptyHighlights() throws Exception {
  indexAnalyzer.setPositionIncrementGap(10);
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Document doc = new Document();

  Field body = new Field("body", "test this is.  another sentence this test has.  far away is that planet.", fieldType);
  doc.add(body);
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  highlighter.setMaxNoHighlightPassages(0);// don't want any default summary
  Query query = new TermQuery(new Term("body", "highlighting"));
  int[] docIDs = new int[]{0};
  String snippets[] = highlighter.highlightFields(new String[]{"body"}, query, docIDs, new int[]{2}).get("body");
  assertEquals(1, snippets.length);
  assertNull(snippets[0]);

  ir.close();
}
 
Example #23
Source File: TestQueryBitSetProducer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testSimple() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, iwc);
  w.addDocument(new Document());
  DirectoryReader reader = w.getReader();

  QueryBitSetProducer producer = new QueryBitSetProducer(new MatchNoDocsQuery());
  assertNull(producer.getBitSet(reader.leaves().get(0)));
  assertEquals(1, producer.cache.size());

  producer = new QueryBitSetProducer(new MatchAllDocsQuery());
  BitSet bitSet = producer.getBitSet(reader.leaves().get(0));
  assertEquals(1, bitSet.length());
  assertEquals(true, bitSet.get(0));
  assertEquals(1, producer.cache.size());

  IOUtils.close(reader, w, dir);
}
 
Example #24
Source File: TestDoubleValuesSource.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void beforeClass() throws Exception {
  dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  final int numDocs;
  if (TEST_NIGHTLY) {
    numDocs = TestUtil.nextInt(random(), 2049, 4000);
  } else {
    numDocs = atLeast(546);
  }
  for (int i = 0; i < numDocs; i++) {
    Document document = new Document();
    document.add(newTextField("english", English.intToEnglish(i), Field.Store.NO));
    document.add(newTextField("oddeven", (i % 2 == 0) ? "even" : "odd", Field.Store.NO));
    document.add(new NumericDocValuesField("int", random().nextInt()));
    document.add(new NumericDocValuesField("long", random().nextLong()));
    document.add(new FloatDocValuesField("float", random().nextFloat()));
    document.add(new DoubleDocValuesField("double", random().nextDouble()));
    if (i == 545)
      document.add(new DoubleDocValuesField("onefield", LEAST_DOUBLE_VALUE));
    iw.addDocument(document);
  }
  reader = iw.getReader();
  iw.close();
  searcher = newSearcher(reader);
}
 
Example #25
Source File: TestInetAddressPoint.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** Add a single address and search for it */
public void testBasics() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  // add a doc with an address
  Document document = new Document();
  InetAddress address = InetAddress.getByName("1.2.3.4");
  document.add(new InetAddressPoint("field", address));
  writer.addDocument(document);
  
  // search and verify we found our doc
  IndexReader reader = writer.getReader();
  IndexSearcher searcher = newSearcher(reader);
  assertEquals(1, searcher.count(InetAddressPoint.newExactQuery("field", address)));
  assertEquals(1, searcher.count(InetAddressPoint.newPrefixQuery("field", address, 24)));
  assertEquals(1, searcher.count(InetAddressPoint.newRangeQuery("field", InetAddress.getByName("1.2.3.3"), InetAddress.getByName("1.2.3.5"))));
  assertEquals(1, searcher.count(InetAddressPoint.newSetQuery("field", InetAddress.getByName("1.2.3.4"))));
  assertEquals(1, searcher.count(InetAddressPoint.newSetQuery("field", InetAddress.getByName("1.2.3.4"), InetAddress.getByName("1.2.3.5"))));
  assertEquals(0, searcher.count(InetAddressPoint.newSetQuery("field", InetAddress.getByName("1.2.3.3"))));
  assertEquals(0, searcher.count(InetAddressPoint.newSetQuery("field")));

  reader.close();
  writer.close();
  dir.close();
}
 
Example #26
Source File: TestPerFieldPostingsFormat2.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void doTestMixedPostings(Codec codec) throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = newIndexWriterConfig(new MockAnalyzer(random()));
  iwc.setCodec(codec);
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
  Document doc = new Document();
  FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
  // turn on vectors for the checkindex cross-check
  ft.setStoreTermVectors(true);
  ft.setStoreTermVectorOffsets(true);
  ft.setStoreTermVectorPositions(true);
  Field idField = new Field("id", "", ft);
  Field dateField = new Field("date", "", ft);
  doc.add(idField);
  doc.add(dateField);
  for (int i = 0; i < 100; i++) {
    idField.setStringValue(Integer.toString(random().nextInt(50)));
    dateField.setStringValue(Integer.toString(random().nextInt(100)));
    iw.addDocument(doc);
  }
  iw.close();
  dir.close(); // checkindex
}
 
Example #27
Source File: TestConstantScoreQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testPropagatesApproximations() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  Field f = newTextField("field", "a b", Field.Store.NO);
  doc.add(f);
  w.addDocument(doc);
  w.commit();

  DirectoryReader reader = w.getReader();
  final IndexSearcher searcher = newSearcher(reader);
  searcher.setQueryCache(null); // to still have approximations

  PhraseQuery pq = new PhraseQuery("field", "a", "b");

  Query q = searcher.rewrite(new ConstantScoreQuery(pq));

  final Weight weight = searcher.createWeight(q, ScoreMode.COMPLETE, 1);
  final Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
  assertNotNull(scorer.twoPhaseIterator());

  reader.close();
  w.close();
  dir.close();
}
 
Example #28
Source File: TestFieldCacheSort.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** Tests sorting a single document */
public void testSortOneDocument() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(newStringField("value", "foo", Field.Store.YES));
  writer.addDocument(doc);
  IndexReader ir = UninvertingReader.wrap(writer.getReader(),
                   Collections.singletonMap("value", Type.SORTED));
  writer.close();
  
  IndexSearcher searcher = newSearcher(ir);
  Sort sort = new Sort(new SortField("value", SortField.Type.STRING));

  TopDocs td = searcher.search(new MatchAllDocsQuery(), 10, sort);
  assertEquals(1, td.totalHits.value);
  assertEquals("foo", searcher.doc(td.scoreDocs[0].doc).get("value"));
  TestUtil.checkReader(ir);
  ir.close();
  dir.close();
}
 
Example #29
Source File: TestFloatPointNearestNeighbor.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testNearestNeighborWithAllDeletedDocs() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, getIndexWriterConfig());
  Document doc = new Document();
  doc.add(new FloatPoint("point", 40.0f, 50.0f));
  doc.add(new StringField("id", "0", Field.Store.YES));
  w.addDocument(doc);
  doc = new Document();
  doc.add(new FloatPoint("point", 45.0f, 55.0f));
  doc.add(new StringField("id", "1", Field.Store.YES));
  w.addDocument(doc);

  DirectoryReader r = w.getReader();
  // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl:
  IndexSearcher s = newSearcher(r, false);
  FieldDoc hit = (FieldDoc)FloatPointNearestNeighbor.nearest(s, "point", 1, 40.0f, 50.0f).scoreDocs[0];
  assertEquals("0", r.document(hit.doc).getField("id").stringValue());
  r.close();

  w.deleteDocuments(new Term("id", "0"));
  w.deleteDocuments(new Term("id", "1"));
  r = w.getReader();
  // can't wrap because we require Lucene60PointsFormat directly but e.g. ParallelReader wraps with its own points impl:
  s = newSearcher(r, false);
  assertEquals(0, FloatPointNearestNeighbor.nearest(s, "point", 1, 40.0f, 50.0f).scoreDocs.length);
  r.close();
  w.close();
  dir.close();
}
 
Example #30
Source File: BaseSimilarityTestCase.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void beforeClass() throws Exception {
  // with norms
  DIR = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), DIR);
  Document doc = new Document();
  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
  fieldType.setOmitNorms(true);
  doc.add(newField("field", "value", fieldType));
  writer.addDocument(doc);
  READER = getOnlyLeafReader(writer.getReader());
  writer.close();
}