org.apache.lucene.index.Term Java Examples

The following examples show how to use org.apache.lucene.index.Term. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestRedisQParser.java    From solr-redis with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldTurnAnalysisOn() throws SyntaxError, IOException {
  when(localParamsMock.get("command")).thenReturn("smembers");
  when(localParamsMock.get("key")).thenReturn("simpleKey");
  when(localParamsMock.getBool("useAnalyzer", false)).thenReturn(true);
  when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
  when(requestMock.getSchema()).thenReturn(schema);
  when(schema.getQueryAnalyzer()).thenReturn(new WhitespaceAnalyzer());
  when(jedisMock.smembers(anyString())).thenReturn(new HashSet<>(Arrays.asList("123 124", "321")));
  redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
  final Query query = redisQParser.parse();
  verify(jedisMock).smembers("simpleKey");
  IndexSearcher searcher = new IndexSearcher(new MultiReader());
  final Set<Term> terms = extractTerms(searcher, query);
  Assert.assertEquals(3, terms.size());
}
 
Example #2
Source File: DocumentAndOp.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * Constructor for an insert, a delete or an update operation.
 * @param op
 * @param doc
 * @param term
 */
public DocumentAndOp(Op op, Document doc, Term term) {
  if (op == Op.INSERT) {
    assert (doc != null);
    assert (term == null);
  } else if (op == Op.DELETE) {
    assert (doc == null);
    assert (term != null);
  } else {
    assert (op == Op.UPDATE);
    assert (doc != null);
    assert (term != null);
  }
  this.op = op;
  this.doc = doc;
  this.term = term;
}
 
Example #3
Source File: PhraseCountQueryBuilder.java    From pyramid with Apache License 2.0 6 votes vote down vote up
protected Query doToQuery(QueryShardContext context) throws IOException {
//        Analyzer analyzer = context.getMapperService().searchAnalyzer();
        Analyzer analyzer = new WhitespaceAnalyzer();
        try (TokenStream source = analyzer.tokenStream(fieldName, value.toString())) {
            CachingTokenFilter stream = new CachingTokenFilter(new LowerCaseFilter(source));
            TermToBytesRefAttribute termAtt = stream.getAttribute(TermToBytesRefAttribute.class);
            if (termAtt == null) {
                return null;
            }
            List<CustomSpanTermQuery> clauses = new ArrayList<>();
            stream.reset();
            while (stream.incrementToken()) {
                Term term = new Term(fieldName, termAtt.getBytesRef());
                    clauses.add(new CustomSpanTermQuery(term));
            }
            return new PhraseCountQuery(clauses.toArray(new CustomSpanTermQuery[clauses.size()]), slop, inOrder, weightedCount);
        } catch (IOException e) {
            throw new RuntimeException("Error analyzing query text", e);
        }


    }
 
Example #4
Source File: TestRedisQParser.java    From solr-redis with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldAddTermsFromSortOrderDesc() throws SyntaxError, IOException {
  when(localParamsMock.get("command")).thenReturn("sort");
  when(localParamsMock.get("key")).thenReturn("simpleKey");
  when(localParamsMock.get("order")).thenReturn("desc");
  when(localParamsMock.get(QueryParsing.V)).thenReturn("string_field");
  when(jedisMock.sort(anyString(), any(SortingParams.class))).thenReturn(Arrays.asList("123", "321"));
  when(requestMock.getSchema()).thenReturn(schema);
  when(schema.getQueryAnalyzer()).thenReturn(new StandardAnalyzer());
  redisQParser = new RedisQParser("string_field", localParamsMock, paramsMock, requestMock, commandHandler);
  final Query query = redisQParser.parse();
  final ArgumentCaptor<SortingParams> argument = ArgumentCaptor.forClass(SortingParams.class);
  verify(jedisMock).sort(eq("simpleKey"), argument.capture());
  Assert.assertEquals(getSortingParamString(new SortingParams().desc()), getSortingParamString(argument.getValue()));
  IndexSearcher searcher = new IndexSearcher(new MultiReader());
  final Set<Term> terms = extractTerms(searcher, query);
  Assert.assertEquals(2, terms.size());
}
 
Example #5
Source File: TestPhraseWildcardQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
protected Term[] expandMultiTerm(String field, String term, int maxExpansions) throws IOException {
  if (maxExpansions == 0) {
    return new Term[0];
  }
  Set<Term> expansions = new HashSet<>();
  WildcardQuery wq = new WildcardQuery(new Term(field, term));
  expansion:
  for (final LeafReaderContext ctx : reader.leaves()) {
    Terms terms = ctx.reader().terms(field);
    if (terms != null) {
      TermsEnum termsEnum = wq.getTermsEnum(terms);
      while (termsEnum.next() != null) {
        expansions.add(new Term(field, termsEnum.term()));
        if (expansions.size() >= maxExpansions) {
          break expansion;
        }
      }
    }
  }
  return expansions.toArray(new Term[0]);
}
 
Example #6
Source File: TestSpanSearchEquivalence.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** SpanNearQuery([A B C], N, false) ⊆ SpanNearQuery([A B C], N+1, false) */
public void testSpanNearIncreasingSloppiness3() throws Exception {
  Term t1 = randomTerm();
  Term t2 = randomTerm();
  Term t3 = randomTerm();
  SpanQuery subquery[] = new SpanQuery[] { 
                           spanQuery(new SpanTermQuery(t1)), 
                           spanQuery(new SpanTermQuery(t2)), 
                           spanQuery(new SpanTermQuery(t3)) 
                         };
  for (int i = 0; i < 10; i++) {
    SpanQuery q1 = spanQuery(new SpanNearQuery(subquery, i, false));
    SpanQuery q2 = spanQuery(new SpanNearQuery(subquery, i+1, false));
    assertSubsetOf(q1, q2);
  }
}
 
Example #7
Source File: LuceneCorpusAdapter.java    From Palmetto with GNU Affero General Public License v3.0 6 votes vote down vote up
@Override
public void getDocumentsWithWordAsSet(String word, IntOpenHashSet documents) {
    DocsEnum docs = null;
    Term term = new Term(fieldName, word);
    try {
        int baseDocId;
        for (int i = 0; i < reader.length; i++) {
            docs = reader[i].termDocsEnum(term);
            baseDocId = contexts[i].docBase;
            if (docs != null) {
                while (docs.nextDoc() != DocsEnum.NO_MORE_DOCS) {
                    documents.add(baseDocId + docs.docID());
                }
            }
        }
    } catch (IOException e) {
        LOGGER.error("Error while requesting documents for word \"" + word + "\".", e);
    }
}
 
Example #8
Source File: TestSimilarity2.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** similar to the above, however the field exists, but we query with a term that doesnt exist too */
public void testEmptyTerm() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(newTextField("foo", "bar", Field.Store.NO));
  iw.addDocument(doc);
  IndexReader ir = iw.getReader();
  iw.close();
  IndexSearcher is = newSearcher(ir);
  
  for (Similarity sim : sims) {
    is.setSimilarity(sim);
    BooleanQuery.Builder query = new BooleanQuery.Builder();
    query.add(new TermQuery(new Term("foo", "bar")), BooleanClause.Occur.SHOULD);
    query.add(new TermQuery(new Term("foo", "baz")), BooleanClause.Occur.SHOULD);
    assertEquals(1, is.search(query.build(), 10).totalHits.value);
  }
  ir.close();
  dir.close();
}
 
Example #9
Source File: TestSynonymQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testEquals() {
  QueryUtils.checkEqual(new SynonymQuery.Builder("foo").build(), new SynonymQuery.Builder("foo").build());
  QueryUtils.checkEqual(new SynonymQuery.Builder("foo").addTerm(new Term("foo", "bar")).build(),
                        new SynonymQuery.Builder("foo").addTerm(new Term("foo", "bar")).build());

  QueryUtils.checkEqual(new SynonymQuery.Builder("a").addTerm(new Term("a", "a")).addTerm(new Term("a", "b")).build(),
                        new SynonymQuery.Builder("a").addTerm(new Term("a", "b")).addTerm(new Term("a", "a")).build());

  QueryUtils.checkEqual(
      new SynonymQuery.Builder("field")
          .addTerm(new Term("field", "b"), 0.4f)
          .addTerm(new Term("field", "c"), 0.2f)
          .addTerm(new Term("field", "d")).build(),
      new SynonymQuery.Builder("field")
          .addTerm(new Term("field", "b"), 0.4f)
          .addTerm(new Term("field", "c"), 0.2f)
          .addTerm(new Term("field", "d")).build());

}
 
Example #10
Source File: IndexManager.java    From spacewalk with GNU General Public License v2.0 6 votes vote down vote up
private void debugExplainResults(String indexName, Hits hits, IndexSearcher searcher,
        Query q, Set<Term> queryTerms)
    throws IOException {
    log.debug("Parsed Query is " + q.toString());
    log.debug("Looking at index:  " + indexName);
    for (int i = 0; i < hits.length(); i++) {
        if ((i < 10)) {
            Document doc = hits.doc(i);
            Float score = hits.score(i);
            Explanation ex = searcher.explain(q, hits.id(i));
            log.debug("Looking at hit<" + i + ", " + hits.id(i) + ", " + score +
                    ">: " + doc);
            log.debug("Explanation: " + ex);
            MatchingField match = new MatchingField(q.toString(), doc, queryTerms);
            String fieldName = match.getFieldName();
            String fieldValue = match.getFieldValue();
            log.debug("Guessing that matched fieldName is " + fieldName + " = " +
                    fieldValue);
        }
    }
}
 
Example #11
Source File: SubsetQueryPlugin.java    From ranger with Apache License 2.0 5 votes vote down vote up
@Override
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
    return new QParser(qstr, localParams, params, req) {

        @Override
        public Query parse() throws SyntaxError {
            String fieldName = Preconditions.checkNotNull(localParams.get(SETVAL_FIELD_NAME));
            String countFieldName = Preconditions.checkNotNull(localParams.get(COUNT_FIELD_NAME));
            boolean allowMissingValues = Boolean.parseBoolean(Preconditions.checkNotNull(localParams.get(MISSING_VAL_ALLOWED)));
            String wildcardToken = localParams.get(WILDCARD_CHAR);

            LongValuesSource minimumNumberMatch = LongValuesSource.fromIntField(countFieldName);
            Collection<Query> queries = new ArrayList<>();

            String fieldVals = Preconditions.checkNotNull(localParams.get(SETVAL_PARAM_NAME));
            for (String v : fieldVals.split(",")) {
                queries.add(new TermQuery(new Term(fieldName, v)));
            }
            if (wildcardToken != null && !wildcardToken.equals("")) {
                queries.add(new TermQuery(new Term(fieldName, wildcardToken)));
            }
            if (allowMissingValues) {
                // To construct this query we need to do a little trick tho construct a test for an empty field as follows:
                // (*:* AND -fieldName:*) ==> parses as: (+*:* -fieldName:*)
                // It is a feature of Lucene that pure negative queries are not allowed (although Solr allows them as a top level construct)
                // therefore we need to AND with *:*
                // We can then pass this BooleanQuery to the CoveringQuery as one of its allowed matches.
                BooleanQuery.Builder builder = new BooleanQuery.Builder();
                builder.add(new BooleanClause(new MatchAllDocsQuery(), BooleanClause.Occur.SHOULD));
                builder.add(new BooleanClause(new WildcardQuery(new Term(fieldName, "*")), BooleanClause.Occur.MUST_NOT));

                queries.add(builder.build());
            }
            return new CoveringQuery(queries, minimumNumberMatch);
        }
    };
}
 
Example #12
Source File: FuzzyLikeThisQueryTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testNoMatchFirstWordBug() throws Throwable {
  FuzzyLikeThisQuery flt = new FuzzyLikeThisQuery(10, analyzer);
  flt.addTerms("fernando smith", "name", 2, 1);
  Query q = flt.rewrite(searcher.getIndexReader());
  HashSet<Term> queryTerms = new HashSet<>();
  q.visit(QueryVisitor.termCollector(queryTerms));
  assertTrue("Should have variant smith", queryTerms.contains(new Term("name", "smith")));
  TopDocs topDocs = searcher.search(flt, 1);
  ScoreDoc[] sd = topDocs.scoreDocs;
  assertTrue("score docs must match 1 doc", (sd != null) && (sd.length > 0));
  Document doc = searcher.doc(sd[0].doc);
  assertEquals("Should match most similar when using 2 words", "2", doc.get("id"));
}
 
Example #13
Source File: TestTermAutomatonQuery.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testRewriteMultiPhraseWithAny() throws Exception {
  TermAutomatonQuery q = new TermAutomatonQuery("field");
  int initState = q.createState();
  int s1 = q.createState();
  int s2 = q.createState();
  int s3 = q.createState();
  q.addTransition(initState, s1, "foo");
  q.addTransition(initState, s1, "bar");
  q.addAnyTransition(s1, s2);
  q.addTransition(s2, s3, "baz");
  q.setAccept(s3, true);
  q.finish();
  
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(newTextField("field", "x y z", Field.Store.NO));
  w.addDocument(doc);

  IndexReader r = w.getReader();
  Query rewrite = q.rewrite(r);
  assertTrue(rewrite instanceof MultiPhraseQuery);
  Term[][] terms = ((MultiPhraseQuery) rewrite).getTermArrays();
  assertEquals(2, terms.length);
  assertEquals(2, terms[0].length);
  assertEquals(new Term("field", "foo"), terms[0][0]);
  assertEquals(new Term("field", "bar"), terms[0][1]);
  assertEquals(1, terms[1].length);
  assertEquals(new Term("field", "baz"), terms[1][0]);

  int[] positions = ((MultiPhraseQuery) rewrite).getPositions();
  assertEquals(2, positions.length);
  assertEquals(0, positions[0]);
  assertEquals(2, positions[1]);
  
  IOUtils.close(w, r, dir);
}
 
Example #14
Source File: TestUnifiedHighlighterReanalysis.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Test(expected = IllegalStateException.class)
public void testIndexSearcherNullness() throws IOException {
  String text = "This is a test. Just a test highlighting without a searcher. Feel free to ignore.";
  Query query = new TermQuery(new Term("body", "highlighting"));

  try (Directory directory = newDirectory();
       RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory);
       IndexReader indexReader = indexWriter.getReader()) {
    IndexSearcher searcher = newSearcher(indexReader);
    UnifiedHighlighter highlighter = new UnifiedHighlighter(searcher, indexAnalyzer);
    highlighter.highlightWithoutSearcher("body", query, text, 1);//should throw
  }
}
 
Example #15
Source File: SolrQueryParserBase.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Builds a new WildcardQuery instance
 * @param t wildcard term
 * @return new WildcardQuery instance
 */
protected Query newWildcardQuery(Term t) {
  WildcardQuery query = new WildcardQuery(t);
  SchemaField sf = schema.getField(t.field());
  query.setRewriteMethod(sf.getType().getRewriteMethod(parser, sf));
  return query;
}
 
Example #16
Source File: TestSuggestField.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Test @Slow
public void testDupSuggestFieldValues() throws Exception {
  Analyzer analyzer = new MockAnalyzer(random());
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwcWithSuggestField(analyzer, "suggest_field"));
  final int num = Math.min(1000, atLeast(100));
  int[] weights = new int[num];
  for(int i = 0; i < num; i++) {
    Document document = new Document();
    weights[i] = random().nextInt(Integer.MAX_VALUE);
    document.add(new SuggestField("suggest_field", "abc", weights[i]));
    iw.addDocument(document);

    if (usually()) {
      iw.commit();
    }
  }

  DirectoryReader reader = iw.getReader();
  Entry[] expectedEntries = new Entry[num];
  Arrays.sort(weights);
  for (int i = 1; i <= num; i++) {
    expectedEntries[i - 1] = new Entry("abc", weights[num - i]);
  }

  SuggestIndexSearcher suggestIndexSearcher = new SuggestIndexSearcher(reader);
  PrefixCompletionQuery query = new PrefixCompletionQuery(analyzer, new Term("suggest_field", "abc"));
  TopSuggestDocs lookupDocs = suggestIndexSearcher.suggest(query, num, false);
  assertSuggestions(lookupDocs, expectedEntries);

  reader.close();
  iw.close();
}
 
Example #17
Source File: CustomSpanQuery.java    From pyramid with Apache License 2.0 5 votes vote down vote up
/**
 * Build a map of terms to termcontexts, for use in constructing SpanWeights
 * @lucene.internal
 */
public static Map<Term, TermContext> getTermContexts(Collection<CustomSpanWeight> weights) {
  Map<Term, TermContext> terms = new TreeMap<>();
  for (CustomSpanWeight w : weights) {
    w.extractTermContexts(terms);
  }
  return terms;
}
 
Example #18
Source File: DistributedAlfrescoSolrTrackerStateIT.java    From SearchServices with GNU Lesser General Public License v3.0 5 votes vote down vote up
private static Acl createAndIndexSomeAclData()
{
    try {

        AclChangeSet aclChangeSet = getAclChangeSet(1);

        Acl acl = getAcl(aclChangeSet);
        Acl acl2 = getAcl(aclChangeSet);

        AclReaders aclReaders = getAclReaders(aclChangeSet, acl, singletonList("joel"), singletonList("phil"), null);
        AclReaders aclReaders2 = getAclReaders(aclChangeSet, acl2, singletonList("jim"), singletonList("phil"), null);

        indexAclChangeSet(aclChangeSet, asList(acl, acl2), asList(aclReaders, aclReaders2));

        BooleanQuery.Builder builder = new BooleanQuery.Builder();
        builder.add(new BooleanClause(new TermQuery(new Term(QueryConstants.FIELD_SOLR4_ID, "TRACKER!STATE!ACLTX")), BooleanClause.Occur.MUST));
        builder.add(new BooleanClause(LegacyNumericRangeQuery.newLongRange(QueryConstants.FIELD_S_ACLTXID, aclChangeSet.getId(), aclChangeSet.getId() + 1, true, false), BooleanClause.Occur.MUST));
        BooleanQuery waitForQuery = builder.build();
        waitForDocCountAllCores(waitForQuery, 1, MAX_WAIT_TIME);

        return acl;
    }
    catch (Exception exception)
    {
        throw new RuntimeException(exception);
    }
}
 
Example #19
Source File: TestUnifiedHighlighter.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testNotReanalyzed() throws Exception {
  if (fieldType == UHTestHelper.reanalysisType) {
    return; // we're testing the *other* cases
  }

  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);

  Field body = new Field("body", "", fieldType);
  Document doc = new Document();
  doc.add(body);

  body.setStringValue("This is a test. Just a test highlighting from postings. Feel free to ignore.");
  iw.addDocument(doc);

  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, new Analyzer() {
    @Override
    protected TokenStreamComponents createComponents(String fieldName) {
      throw new AssertionError("shouldn't be called");
    }
  });
  Query query = new TermQuery(new Term("body", "highlighting"));
  TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER);
  assertEquals(1, topDocs.totalHits.value);
  String snippets[] = highlighter.highlight("body", query, topDocs);
  assertEquals(1, snippets.length);
  assertEquals("Just a test <b>highlighting</b> from postings. ", snippets[0]);

  ir.close();
}
 
Example #20
Source File: MtasSpanSequenceQuery.java    From mtas with Apache License 2.0 5 votes vote down vote up
@Override
public void extractTermContexts(Map<Term, TermContext> contexts) {
  for (MtasSpanSequenceQueryWeight w : subWeights) {
    w.spanWeight.extractTermContexts(contexts);
  }
  if (ignoreWeight != null) {
    ignoreWeight.extractTermContexts(contexts);
  }
}
 
Example #21
Source File: BaseGroupSelectorTestCase.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testSortByRelevance() throws IOException {

    Shard shard = new Shard();
    indexRandomDocs(shard.writer);

    String[] query = new String[]{ "foo", "bar", "baz" };
    Query topLevel = new TermQuery(new Term("text", query[random().nextInt(query.length)]));

    IndexSearcher searcher = shard.getIndexSearcher();
    GroupingSearch grouper = new GroupingSearch(getGroupSelector());
    grouper.setGroupDocsLimit(10);
    TopGroups<T> topGroups = grouper.search(searcher, topLevel, 0, 5);
    TopDocs topDoc = searcher.search(topLevel, 1);
    for (int i = 0; i < topGroups.groups.length; i++) {
      // Each group should have a result set equal to that returned by the top-level query,
      // filtered by the group value.
      Query filtered = new BooleanQuery.Builder()
          .add(topLevel, BooleanClause.Occur.MUST)
          .add(filterQuery(topGroups.groups[i].groupValue), BooleanClause.Occur.FILTER)
          .build();
      TopDocs td = searcher.search(filtered, 10);
      assertScoreDocsEquals(topGroups.groups[i].scoreDocs, td.scoreDocs);
      if (i == 0) {
        assertEquals(td.scoreDocs[0].doc, topDoc.scoreDocs[0].doc);
        assertEquals(td.scoreDocs[0].score, topDoc.scoreDocs[0].score, 0);
      }
    }

    shard.close();
  }
 
Example #22
Source File: LuceneIndex.java    From cjs_ssms with GNU General Public License v2.0 5 votes vote down vote up
/**
 * 更新博客索引
 *
 * @param user
 * @throws Exception
 */
public void updateIndex(UUser user) throws Exception {
  IndexWriter writer = getWriter();
  Document doc = new Document();
  doc.add(new StringField("userid", String.valueOf(user.getId()), Field.Store.YES));
  doc.add(new TextField("username", user.getUsername(), Field.Store.YES));
  doc.add(new TextField("description", user.getDescription(), Field.Store.YES));

  writer.updateDocument(new Term("userid", String.valueOf(user.getId())), doc);
  writer.close();
}
 
Example #23
Source File: ExtendedCommonTermsQuery.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
protected Query newTermQuery(Term term, TermContext context) {
    if (fieldType == null) {
        return super.newTermQuery(term, context);
    }
    final Query query = fieldType.queryStringTermQuery(term);
    if (query == null) {
        return super.newTermQuery(term, context);
    } else {
        return query;
    }
}
 
Example #24
Source File: TestFuzzyQuery.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testDistanceAsEditsSearching() throws Exception {
  Directory index = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), index);
  addDoc("foobar", w);
  addDoc("test", w);
  addDoc("working", w);
  IndexReader reader = w.getReader();
  IndexSearcher searcher = newSearcher(reader);
  w.close();
  
  FuzzyQuery q = new FuzzyQuery(new Term("field", "fouba"), 2);
  ScoreDoc[] hits = searcher.search(q, 10).scoreDocs;
  assertEquals(1, hits.length);
  assertEquals("foobar", searcher.doc(hits[0].doc).get("field"));
  
  q = new FuzzyQuery(new Term("field", "foubara"), 2);
  hits = searcher.search(q, 10).scoreDocs;
  assertEquals(1, hits.length);
  assertEquals("foobar", searcher.doc(hits[0].doc).get("field"));
  
  expectThrows(IllegalArgumentException.class, () -> {
    new FuzzyQuery(new Term("field", "t"), 3);
  });

  reader.close();
  index.close();
}
 
Example #25
Source File: TestUnifiedHighlighterMTQ.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testPositionSensitiveWithWildcardDoesNotHighlight() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);
  Document doc = new Document();
  doc.add(new Field("body", "iterate insect ipswitch illinois indirect", fieldType));
  doc.add(newTextField("id", "id", Field.Store.YES));

  iw.addDocument(doc);
  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = randomUnifiedHighlighter(searcher, indexAnalyzer);
  int docID = searcher.search(new TermQuery(new Term("id", "id")), 1).scoreDocs[0].doc;

  PhraseQuery pq = new PhraseQuery.Builder()
      .add(new Term("body", "consent"))
      .add(new Term("body", "order"))
      .build();

  BooleanQuery query = new BooleanQuery.Builder()
      .add(new WildcardQuery(new Term("body", "enforc*")), BooleanClause.Occur.MUST)
      .add(pq, BooleanClause.Occur.MUST)
      .build();

  int[] docIds = new int[]{docID};

  String snippets[] = highlighter.highlightFields(new String[]{"body"}, query, docIds, new int[]{2}).get("body");
  assertEquals(1, snippets.length);
  assertEquals("iterate insect ipswitch illinois indirect", snippets[0]);
  ir.close();
}
 
Example #26
Source File: TestComplexExplanations.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testFQ5() throws Exception {
  TermQuery query = new TermQuery(new Term(FIELD, "xx"));
  Query filtered = new BooleanQuery.Builder()
      .add(new BoostQuery(query, 0), Occur.MUST)
      .add(matchTheseItems(new int[] {1,3}), Occur.FILTER)
      .build();
  bqtest(filtered, new int[] {3});
}
 
Example #27
Source File: BlendedTermQuery.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public BlendedTermQuery(Term[] terms, float[] boosts) {
    if (terms == null || terms.length == 0) {
        throw new IllegalArgumentException("terms must not be null or empty");
    }
    if (boosts != null && boosts.length != terms.length) {
        throw new IllegalArgumentException("boosts must have the same size as terms");
    }
    this.terms = terms;
    this.boosts = boosts;
}
 
Example #28
Source File: CompositeTermRecognitionProcessor.java    From jate with GNU Lesser General Public License v3.0 5 votes vote down vote up
@Override
public Boolean candidateExtraction(SolrCore core, String jatePropertyFile)
        throws IOException, JATEException {
    SolrIndexSearcher indexSearcher = core.getSearcher().get();
    IndexWriter writerIn = null;
    try {
    	writerIn = core.getSolrCoreState().getIndexWriter(core).get();
     Map<String,List<CopyField>> copyFields = core.getLatestSchema().getCopyFieldsMap();
	
     for (int i=0; i<indexSearcher.maxDoc(); i++) {
         Document doc = indexSearcher.doc(i);
	
         SolrUtil.copyFields(copyFields, DEFAULT_BOOST_VALUE, doc);
	
         writerIn.updateDocument(new Term("id",doc.get("id")), doc);
     }
     writerIn.commit();
	
     return true;
    } finally {
    	indexSearcher.close();
    	if (writerIn != null) {
    		writerIn.close();
    	}
    	
    }
}
 
Example #29
Source File: TestUnifiedHighlighterMTQ.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testCustomSpanQueryHighlighting() throws Exception {
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, indexAnalyzer);
  Document doc = new Document();
  doc.add(new Field("body", "alpha bravo charlie delta echo foxtrot golf hotel india juliet", fieldType));
  doc.add(newTextField("id", "id", Field.Store.YES));

  iw.addDocument(doc);
  IndexReader ir = iw.getReader();
  iw.close();

  IndexSearcher searcher = newSearcher(ir);
  UnifiedHighlighter highlighter = new UnifiedHighlighter(searcher, indexAnalyzer);

  int docId = searcher.search(new TermQuery(new Term("id", "id")), 1).scoreDocs[0].doc;

  WildcardQuery wildcardQuery = new WildcardQuery(new Term("body", "foxtr*"));
  SpanMultiTermQueryWrapper<WildcardQuery> wildcardQueryWrapper = new SpanMultiTermQueryWrapper<>(wildcardQuery);

  SpanQuery wrappedQuery = new MyWrapperSpanQuery(wildcardQueryWrapper);

  BooleanQuery query = new BooleanQuery.Builder()
      .add(wrappedQuery, BooleanClause.Occur.SHOULD)
      .build();

  int[] docIds = new int[]{docId};

  String snippets[] = highlighter.highlightFields(new String[]{"body"}, query, docIds, new int[]{2}).get("body");
  assertEquals(1, snippets.length);
  assertEquals("alpha bravo charlie delta echo <b>foxtrot</b> golf hotel india juliet", snippets[0]);
  ir.close();
}
 
Example #30
Source File: AbstractLTRQueryTestCase.java    From ltr4l with Apache License 2.0 5 votes vote down vote up
protected FieldFeatureExtractorFactory getIDF(String featureName, String fieldName, IndexReaderContext context, Term... terms){
  FieldFeatureExtractorFactory factory = new FieldFeatureIDFExtractorFactory(featureName, fieldName);
  if(context != null){
    factory.init(context, terms);
  }
  return factory;
}