Java Code Examples for org.apache.lucene.index.RandomIndexWriter#forceMerge()

The following examples show how to use org.apache.lucene.index.RandomIndexWriter#forceMerge() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestSpanCollection.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public void setUp() throws Exception {
  super.setUp();
  directory = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory,
      newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
  for (int i = 0; i < docFields.length; i++) {
    Document doc = new Document();
    doc.add(newField(FIELD, docFields[i], OFFSETS));
    writer.addDocument(doc);
  }
  writer.forceMerge(1);
  reader = writer.getReader();
  writer.close();
  searcher = newSearcher(getOnlyLeafReader(reader));
}
 
Example 2
Source File: TestConstantScoreScorer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
TestConstantScoreScorerIndex() throws IOException {
  directory = newDirectory();

  writer = new RandomIndexWriter(random(), directory,
      newIndexWriterConfig().setMergePolicy(newLogMergePolicy(random().nextBoolean())));

  for (String VALUE : VALUES) {
    Document doc = new Document();
    doc.add(newTextField(FIELD, VALUE, Field.Store.YES));
    writer.addDocument(doc);
  }
  writer.forceMerge(1);

  reader = writer.getReader();
  writer.close();
}
 
Example 3
Source File: TestTermScorer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public void setUp() throws Exception {
  super.setUp();
  directory = newDirectory();
  
  RandomIndexWriter writer = new RandomIndexWriter(random(), directory, 
      newIndexWriterConfig(new MockAnalyzer(random()))
      .setMergePolicy(newLogMergePolicy())
      .setSimilarity(new ClassicSimilarity()));
  for (int i = 0; i < values.length; i++) {
    Document doc = new Document();
    doc.add(newTextField(FIELD, values[i], Field.Store.YES));
    writer.addDocument(doc);
  }
  writer.forceMerge(1);
  indexReader = getOnlyLeafReader(writer.getReader());
  writer.close();
  indexSearcher = newSearcher(indexReader, false);
  indexSearcher.setSimilarity(new ClassicSimilarity());
}
 
Example 4
Source File: TestFeatureDoubleValues.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testFeature() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig config = newIndexWriterConfig().setMergePolicy(newLogMergePolicy(random().nextBoolean()));
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
  Document doc = new Document();
  doc.add(new FeatureField("field", "name", 30F));
  writer.addDocument(doc);
  doc = new Document();
  doc.add(new FeatureField("field", "name", 1F));
  writer.addDocument(doc);
  doc = new Document();
  doc.add(new FeatureField("field", "name", 4F));
  writer.addDocument(doc);
  writer.forceMerge(1);
  IndexReader ir = writer.getReader();
  writer.close();

  assertEquals(1, ir.leaves().size());
  LeafReaderContext context = ir.leaves().get(0);
  DoubleValuesSource valuesSource = FeatureField.newDoubleValues("field", "name");
  DoubleValues values = valuesSource.getValues(context, null);

  assertTrue(values.advanceExact(0));
  assertEquals(30, values.doubleValue(), 0f);
  assertTrue(values.advanceExact(1));
  assertEquals(1, values.doubleValue(), 0f);
  assertTrue(values.advanceExact(2));
  assertEquals(4, values.doubleValue(), 0f);
  assertFalse(values.advanceExact(3));

  ir.close();
  dir.close();
}
 
Example 5
Source File: TestBlockJoin.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testToChildInitialAdvanceParentButNoKids() throws Exception {

    final Directory dir = newDirectory();
    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);

    // degenerate case: first doc has no children
    w.addDocument(makeResume("first", "nokids"));
    w.addDocuments(Arrays.asList(makeJob("job", 42), makeResume("second", "haskid")));

    // single segment
    w.forceMerge(1);

    final IndexReader r = w.getReader();
    final IndexSearcher s = newSearcher(r, false);
    w.close();

    BitSetProducer parentFilter = new QueryBitSetProducer(new TermQuery(new Term("docType", "resume")));
    Query parentQuery = new TermQuery(new Term("docType", "resume"));

    ToChildBlockJoinQuery parentJoinQuery = new ToChildBlockJoinQuery(parentQuery, parentFilter);

    Weight weight = s.createWeight(s.rewrite(parentJoinQuery), RandomPicks.randomFrom(random(), org.apache.lucene.search.ScoreMode.values()), 1);
    Scorer advancingScorer = weight.scorer(s.getIndexReader().leaves().get(0));
    Scorer nextDocScorer = weight.scorer(s.getIndexReader().leaves().get(0));

    final int firstKid = nextDocScorer.iterator().nextDoc();
    assertTrue("firstKid not found", DocIdSetIterator.NO_MORE_DOCS != firstKid);
    assertEquals(firstKid, advancingScorer.iterator().advance(0));

    r.close();
    dir.close();
  }
 
Example 6
Source File: TestLatLonShape.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testLUCENE9055() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);

  // test polygons:
  //[5, 5], [10, 6], [10, 10], [5, 10], [5, 5] ]
  Polygon indexPoly1 = new Polygon(
      new double[] {5d, 6d, 10d, 10d, 5d},
      new double[] {5d, 10d, 10d, 5d, 5d}
  );

  // [ [6, 6], [9, 6], [9, 9], [6, 9], [6, 6] ]
  Polygon indexPoly2 = new Polygon(
      new double[] {6d, 6d, 9d, 9d, 6d},
      new double[] {6d, 9d, 9d, 6d, 6d}
  );

  // index polygons:
  Document doc;
  addPolygonsToDoc(FIELDNAME, doc = new Document(), indexPoly1);
  w.addDocument(doc);
  addPolygonsToDoc(FIELDNAME, doc = new Document(), indexPoly2);
  w.addDocument(doc);
  w.forceMerge(1);

  ///// search //////
  IndexReader reader = w.getReader();
  w.close();
  IndexSearcher searcher = newSearcher(reader);

  // [ [0, 0], [5, 5], [7, 7] ]
  Line searchLine = new Line(new double[] {0, 5, 7}, new double[] {0, 5, 7});


  Query q = LatLonShape.newLineQuery(FIELDNAME, QueryRelation.INTERSECTS, searchLine);
  assertEquals(2, searcher.count(q));

  IOUtils.close(w, reader, dir);
}
 
Example 7
Source File: TestMultiTermQueryRewrites.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void beforeClass() throws Exception {
  dir = newDirectory();
  sdir1 = newDirectory();
  sdir2 = newDirectory();
  final RandomIndexWriter writer = new RandomIndexWriter(random(), dir, new MockAnalyzer(random()));
  final RandomIndexWriter swriter1 = new RandomIndexWriter(random(), sdir1, new MockAnalyzer(random()));
  final RandomIndexWriter swriter2 = new RandomIndexWriter(random(), sdir2, new MockAnalyzer(random()));

  for (int i = 0; i < 10; i++) {
    Document doc = new Document();
    doc.add(newStringField("data", Integer.toString(i), Field.Store.NO));
    writer.addDocument(doc);
    ((i % 2 == 0) ? swriter1 : swriter2).addDocument(doc);
  }
  writer.forceMerge(1); swriter1.forceMerge(1); swriter2.forceMerge(1);
  writer.close(); swriter1.close(); swriter2.close();
  
  reader = DirectoryReader.open(dir);
  searcher = newSearcher(reader);
  
  multiReader = new MultiReader(new IndexReader[] {
    DirectoryReader.open(sdir1), DirectoryReader.open(sdir2) 
  }, true);
  multiSearcher = newSearcher(multiReader);
  
  multiReaderDupls = new MultiReader(new IndexReader[] {
    DirectoryReader.open(sdir1), DirectoryReader.open(dir) 
  }, true);
  multiSearcherDupls = newSearcher(multiReaderDupls);
}
 
Example 8
Source File: TestMinShouldMatch2.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void beforeClass() throws Exception {
  dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  final int numDocs = atLeast(300);
  for (int i = 0; i < numDocs; i++) {
    Document doc = new Document();
    
    addSome(doc, alwaysTerms);
    
    if (random().nextInt(100) < 90) {
      addSome(doc, commonTerms);
    }
    if (random().nextInt(100) < 50) {
      addSome(doc, mediumTerms);
    }
    if (random().nextInt(100) < 10) {
      addSome(doc, rareTerms);
    }
    iw.addDocument(doc);
  }
  iw.forceMerge(1);
  iw.close();
  r = DirectoryReader.open(dir);
  reader = getOnlyLeafReader(r);
  searcher = new IndexSearcher(reader);
  searcher.setSimilarity(new ClassicSimilarity());
}
 
Example 9
Source File: FunctionTestSetup.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected static void createIndex(boolean doMultiSegment) throws Exception {
  if (VERBOSE) {
    System.out.println("TEST: setUp");
  }
  // prepare a small index with just a few documents.
  dir = newDirectory();
  anlzr = new MockAnalyzer(random());
  IndexWriterConfig iwc = newIndexWriterConfig(anlzr).setMergePolicy(newLogMergePolicy());
  if (doMultiSegment) {
    iwc.setMaxBufferedDocs(TestUtil.nextInt(random(), 2, 7));
  }
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwc);
  // add docs not exactly in natural ID order, to verify we do check the order of docs by scores
  int remaining = N_DOCS;
  boolean done[] = new boolean[N_DOCS];
  int i = 0;
  while (remaining > 0) {
    if (done[i]) {
      throw new Exception("to set this test correctly N_DOCS=" + N_DOCS + " must be primary and greater than 2!");
    }
    addDoc(iw, i);
    done[i] = true;
    i = (i + 4) % N_DOCS;
    remaining --;
  }
  if (!doMultiSegment) {
    if (VERBOSE) {
      System.out.println("TEST: setUp full merge");
    }
    iw.forceMerge(1);
  }
  iw.close();
  if (VERBOSE) {
    System.out.println("TEST: setUp done close");
  }
}
 
Example 10
Source File: TestNearSpansOrdered.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public void setUp() throws Exception {
  super.setUp();
  directory = newDirectory();
  RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
  for (int i = 0; i < docFields.length; i++) {
    Document doc = new Document();
    doc.add(newTextField(FIELD, docFields[i], Field.Store.NO));
    writer.addDocument(doc);
  }
  writer.forceMerge(1);
  reader = writer.getReader();
  writer.close();
  searcher = newSearcher(getOnlyLeafReader(reader));
}
 
Example 11
Source File: TestSpanContainQuery.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public void setUp() throws Exception {
  super.setUp();
  directory = newDirectory();
  RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
  for (int i = 0; i < docFields.length; i++) {
    Document doc = new Document();
    doc.add(newTextField(field, docFields[i], Field.Store.YES));
    writer.addDocument(doc);
  }
  writer.forceMerge(1);
  reader = writer.getReader();
  writer.close();
  searcher = newSearcher(getOnlyLeafReader(reader));
}
 
Example 12
Source File: LtrQueryTests.java    From elasticsearch-learning-to-rank with Apache License 2.0 5 votes vote down vote up
@Before
public void setupIndex() throws IOException {
    dirUnderTest = newDirectory();
    List<Similarity> sims = Arrays.asList(
            new ClassicSimilarity(),
            new SweetSpotSimilarity(), // extends Classic
            new BM25Similarity(),
            new LMDirichletSimilarity(),
            new BooleanSimilarity(),
            new LMJelinekMercerSimilarity(0.2F),
            new AxiomaticF3LOG(0.5F, 10),
            new DFISimilarity(new IndependenceChiSquared()),
            new DFRSimilarity(new BasicModelG(), new AfterEffectB(), new NormalizationH1()),
            new IBSimilarity(new DistributionLL(), new LambdaDF(), new NormalizationH3())
        );
    similarity = sims.get(random().nextInt(sims.size()));

    indexWriterUnderTest = new RandomIndexWriter(random(), dirUnderTest, newIndexWriterConfig().setSimilarity(similarity));
    for (int i = 0; i < docs.length; i++) {
        Document doc = new Document();
        doc.add(newStringField("id", "" + i, Field.Store.YES));
        doc.add(newField("field", docs[i], Store.YES));
        indexWriterUnderTest.addDocument(doc);
    }
    indexWriterUnderTest.commit();
    indexWriterUnderTest.forceMerge(1);
    indexWriterUnderTest.flush();


    indexReaderUnderTest = indexWriterUnderTest.getReader();
    searcherUnderTest = newSearcher(indexReaderUnderTest);
    searcherUnderTest.setSimilarity(similarity);
}
 
Example 13
Source File: TestPointQueries.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testRangeOptimizesIfAllPointsMatch() throws IOException {
  final int numDims = TestUtil.nextInt(random(), 1, 3);
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  int[] value = new int[numDims];
  for (int i = 0; i < numDims; ++i) {
    value[i] = TestUtil.nextInt(random(), 1, 10);
  }
  doc.add(new IntPoint("point", value));
  w.addDocument(doc);
  IndexReader reader = w.getReader();
  IndexSearcher searcher = new IndexSearcher(reader);
  searcher.setQueryCache(null);
  int[] lowerBound = new int[numDims];
  int[] upperBound = new int[numDims];
  for (int i = 0; i < numDims; ++i) {
    lowerBound[i] = value[i] - random().nextInt(1);
    upperBound[i] = value[i] + random().nextInt(1);
  }
  Query query = IntPoint.newRangeQuery("point", lowerBound, upperBound);
  Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1);
  Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
  assertEquals(DocIdSetIterator.all(1).getClass(), scorer.iterator().getClass());

  // When not all documents in the query have a value, the optimization is not applicable
  reader.close();
  w.addDocument(new Document());
  w.forceMerge(1);
  reader = w.getReader();
  searcher = new IndexSearcher(reader);
  searcher.setQueryCache(null);
  weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1);
  scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
  assertFalse(DocIdSetIterator.all(1).getClass().equals(scorer.iterator().getClass()));

  reader.close();
  w.close();
  dir.close();
}
 
Example 14
Source File: TestFeatureDoubleValues.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testFeatureMultipleMissing() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig config = newIndexWriterConfig().setMergePolicy(newLogMergePolicy(random().nextBoolean()));
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, config);
  Document doc = new Document();
  writer.addDocument(doc);
  doc = new Document();
  writer.addDocument(doc);
  doc = new Document();
  writer.addDocument(doc);
  doc = new Document();
  writer.addDocument(doc);
  doc = new Document();
  writer.addDocument(doc);
  doc = new Document();
  doc.add(new FeatureField("field", "name", 1F));
  writer.addDocument(doc);
  doc = new Document();
  doc.add(new FeatureField("field", "name", 4F));
  writer.addDocument(doc);
  writer.forceMerge(1);
  IndexReader ir = writer.getReader();
  writer.close();

  assertEquals(1, ir.leaves().size());
  LeafReaderContext context = ir.leaves().get(0);
  DoubleValuesSource valuesSource = FeatureField.newDoubleValues("field", "name");
  DoubleValues values = valuesSource.getValues(context, null);

  assertFalse(values.advanceExact(0));
  assertFalse(values.advanceExact(1));
  assertFalse(values.advanceExact(2));
  assertFalse(values.advanceExact(3));
  assertFalse(values.advanceExact(4));
  assertTrue(values.advanceExact(5));
  assertEquals(1, values.doubleValue(), 0f);
  assertTrue(values.advanceExact(6));
  assertEquals(4, values.doubleValue(), 0f);
  assertFalse(values.advanceExact(7));

  ir.close();
  dir.close();
}
 
Example 15
Source File: TestLegacyFieldCache.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testIntFieldCache() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
  cfg.setMergePolicy(newLogMergePolicy());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
  Document doc = new Document();
  LegacyIntField field = new LegacyIntField("f", 0, Store.YES);
  doc.add(field);
  final int[] values = new int[TestUtil.nextInt(random(), 1, 10)];
  Set<Integer> missing = new HashSet<>();
  for (int i = 0; i < values.length; ++i) {
    final int v;
    switch (random().nextInt(10)) {
      case 0:
        v = Integer.MIN_VALUE;
        break;
      case 1:
        v = 0;
        break;
      case 2:
        v = Integer.MAX_VALUE;
        break;
      default:
        v = TestUtil.nextInt(random(), -10, 10);
        break;
    }
    values[i] = v;
    if (v == 0 && random().nextBoolean()) {
      // missing
      iw.addDocument(new Document());
      missing.add(i);
    } else {
      field.setIntValue(v);
      iw.addDocument(doc);
    }
  }
  iw.forceMerge(1);
  final DirectoryReader reader = iw.getReader();
  final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlyLeafReader(reader), "f", FieldCache.LEGACY_INT_PARSER);
  for (int i = 0; i < values.length; ++i) {
    if (missing.contains(i) == false) {
      assertEquals(i, ints.nextDoc());
      assertEquals(values[i], ints.longValue());
    }
  }
  assertEquals(NO_MORE_DOCS, ints.nextDoc());
  reader.close();
  iw.close();
  dir.close();
}
 
Example 16
Source File: TestFieldCache.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testIntFieldCache() throws IOException {
  Directory dir = newDirectory();
  IndexWriterConfig cfg = newIndexWriterConfig(new MockAnalyzer(random()));
  cfg.setMergePolicy(newLogMergePolicy());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, cfg);
  Document doc = new Document();
  IntPoint field = new IntPoint("f", 0);
  doc.add(field);
  final int[] values = new int[TestUtil.nextInt(random(), 1, 10)];
  Set<Integer> missing = new HashSet<>();
  for (int i = 0; i < values.length; ++i) {
    final int v;
    switch (random().nextInt(10)) {
      case 0:
        v = Integer.MIN_VALUE;
        break;
      case 1:
        v = 0;
        break;
      case 2:
        v = Integer.MAX_VALUE;
        break;
      default:
        v = TestUtil.nextInt(random(), -10, 10);
        break;
    }
    values[i] = v;
    if (v == 0 && random().nextBoolean()) {
      // missing
      iw.addDocument(new Document());
      missing.add(i);
    } else {
      field.setIntValue(v);
      iw.addDocument(doc);
    }
  }
  iw.forceMerge(1);
  final DirectoryReader reader = iw.getReader();
  final NumericDocValues ints = FieldCache.DEFAULT.getNumerics(getOnlyLeafReader(reader), "f", FieldCache.INT_POINT_PARSER);
  for (int i = 0; i < values.length; ++i) {
    if (missing.contains(i) == false) {
      assertEquals(i, ints.nextDoc());
      assertEquals(values[i], ints.longValue());
    }
  }
  assertEquals(NO_MORE_DOCS, ints.nextDoc());
  reader.close();
  iw.close();
  dir.close();
}
 
Example 17
Source File: TestBooleanScorer.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testSparseClauseOptimization() throws IOException {
  // When some windows have only one scorer that can match, the scorer will
  // directly call the collector in this window
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document emptyDoc = new Document();
  final int numDocs = atLeast(10);
  int numEmptyDocs = atLeast(200);
  for (int d = 0; d < numDocs; ++d) {
    for (int i = numEmptyDocs; i >= 0; --i) {
      w.addDocument(emptyDoc);
    }
    Document doc = new Document();
    for (String value : Arrays.asList("foo", "bar", "baz")) {
      if (random().nextBoolean()) {
        doc.add(new StringField("field", value, Store.NO));
      }
    }
  }
  numEmptyDocs = atLeast(200);
  for (int i = numEmptyDocs; i >= 0; --i) {
    w.addDocument(emptyDoc);
  }
  if (random().nextBoolean()) {
    w.forceMerge(1);
  }
  IndexReader reader = w.getReader();
  IndexSearcher searcher = newSearcher(reader);

  Query query = new BooleanQuery.Builder()
    .add(new BoostQuery(new TermQuery(new Term("field", "foo")), 3), Occur.SHOULD)
    .add(new BoostQuery(new TermQuery(new Term("field", "bar")), 3), Occur.SHOULD)
    .add(new BoostQuery(new TermQuery(new Term("field", "baz")), 3), Occur.SHOULD)
    .build();

  // duel BS1 vs. BS2
  QueryUtils.check(random(), query, searcher);

  reader.close();
  w.close();
  dir.close();
}
 
Example 18
Source File: TestDisjunctionMaxQuery.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
public void setUp() throws Exception {
  super.setUp();
  
  index = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), index,
      newIndexWriterConfig(new MockAnalyzer(random()))
                           .setSimilarity(sim).setMergePolicy(newLogMergePolicy()));
  
  // hed is the most important field, dek is secondary
  
  // d1 is an "ok" match for: albino elephant
  {
    Document d1 = new Document();
    d1.add(newField("id", "d1", nonAnalyzedType));// Field.Keyword("id",
                                                                             // "d1"));
    d1
        .add(newTextField("hed", "elephant", Field.Store.YES));// Field.Text("hed", "elephant"));
    d1
        .add(newTextField("dek", "elephant", Field.Store.YES));// Field.Text("dek", "elephant"));
    writer.addDocument(d1);
  }
  
  // d2 is a "good" match for: albino elephant
  {
    Document d2 = new Document();
    d2.add(newField("id", "d2", nonAnalyzedType));// Field.Keyword("id",
                                                                             // "d2"));
    d2
        .add(newTextField("hed", "elephant", Field.Store.YES));// Field.Text("hed", "elephant"));
    d2.add(newTextField("dek", "albino", Field.Store.YES));// Field.Text("dek",
                                                                              // "albino"));
    d2
        .add(newTextField("dek", "elephant", Field.Store.YES));// Field.Text("dek", "elephant"));
    writer.addDocument(d2);
  }
  
  // d3 is a "better" match for: albino elephant
  {
    Document d3 = new Document();
    d3.add(newField("id", "d3", nonAnalyzedType));// Field.Keyword("id",
                                                                             // "d3"));
    d3.add(newTextField("hed", "albino", Field.Store.YES));// Field.Text("hed",
                                                                              // "albino"));
    d3
        .add(newTextField("hed", "elephant", Field.Store.YES));// Field.Text("hed", "elephant"));
    writer.addDocument(d3);
  }
  
  // d4 is the "best" match for: albino elephant
  {
    Document d4 = new Document();
    d4.add(newField("id", "d4", nonAnalyzedType));// Field.Keyword("id",
                                                                             // "d4"));
    d4.add(newTextField("hed", "albino", Field.Store.YES));// Field.Text("hed",
                                                                              // "albino"));
    d4
        .add(newField("hed", "elephant", nonAnalyzedType));// Field.Text("hed", "elephant"));
    d4.add(newTextField("dek", "albino", Field.Store.YES));// Field.Text("dek",
                                                                              // "albino"));
    writer.addDocument(d4);
  }
  
  writer.forceMerge(1);
  r = getOnlyLeafReader(writer.getReader());
  writer.close();
  s = new IndexSearcher(r);
  s.setSimilarity(sim);
}
 
Example 19
Source File: TestFieldMaskingSpanQuery.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void beforeClass() throws Exception {
  directory = newDirectory();
  RandomIndexWriter writer= new RandomIndexWriter(random(), directory, newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy()));
  
  writer.addDocument(doc(new Field[] { field("id", "0")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "james"),
                                       field("last",   "jones")     }));
                                             
  writer.addDocument(doc(new Field[] { field("id", "1")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "james"),
                                       field("last",   "smith")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "sally"),
                                       field("last",   "jones")     }));
  
  writer.addDocument(doc(new Field[] { field("id", "2")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "greta"),
                                       field("last",   "jones")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "sally"),
                                       field("last",   "smith")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "james"),
                                       field("last",   "jones")     }));
   
  writer.addDocument(doc(new Field[] { field("id", "3")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "lisa"),
                                       field("last",   "jones")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "bob"),
                                       field("last",   "costas")     }));
  
  writer.addDocument(doc(new Field[] { field("id", "4")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "sally"),
                                       field("last",   "smith")
                                       ,
                                       field("gender", "female"),
                                       field("first",  "linda"),
                                       field("last",   "dixit")
                                       ,
                                       field("gender", "male"),
                                       field("first",  "bubba"),
                                       field("last",   "jones")     }));
  writer.forceMerge(1);
  reader = writer.getReader();
  writer.close();
  searcher = new IndexSearcher(getOnlyLeafReader(reader));
}
 
Example 20
Source File: TestLatLonShape.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testLUCENE8736() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);

  // test polygons:
  Polygon indexPoly1 = new Polygon(
      new double[] {4d, 4d, 3d, 3d, 4d},
      new double[] {3d, 4d, 4d, 3d, 3d}
  );

  Polygon indexPoly2 = new Polygon(
      new double[] {2d, 2d, 1d, 1d, 2d},
      new double[] {6d, 7d, 7d, 6d, 6d}
  );

  Polygon indexPoly3 = new Polygon(
      new double[] {1d, 1d, 0d, 0d, 1d},
      new double[] {3d, 4d, 4d, 3d, 3d}
  );

  Polygon indexPoly4 = new Polygon(
      new double[] {2d, 2d, 1d, 1d, 2d},
      new double[] {0d, 1d, 1d, 0d, 0d}
  );

  // index polygons:
  Document doc;
  addPolygonsToDoc(FIELDNAME, doc = new Document(), indexPoly1);
  w.addDocument(doc);
  addPolygonsToDoc(FIELDNAME, doc = new Document(), indexPoly2);
  w.addDocument(doc);
  addPolygonsToDoc(FIELDNAME, doc = new Document(), indexPoly3);
  w.addDocument(doc);
  addPolygonsToDoc(FIELDNAME, doc = new Document(), indexPoly4);
  w.addDocument(doc);
  w.forceMerge(1);

  ///// search //////
  IndexReader reader = w.getReader();
  w.close();
  IndexSearcher searcher = newSearcher(reader);

  Polygon[] searchPoly = new Polygon[] {
      new Polygon(new double[] {4d, 4d, 0d, 0d, 4d},
          new double[] {0d, 7d, 7d, 0d, 0d})
  };

  Query q = LatLonShape.newPolygonQuery(FIELDNAME, QueryRelation.WITHIN, searchPoly);
  assertEquals(4, searcher.count(q));

  IOUtils.close(w, reader, dir);
}