Java Code Examples for org.apache.lucene.analysis.Token

The following examples show how to use org.apache.lucene.analysis.Token. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: lucene-solr   Source File: TestField.java    License: Apache License 2.0 6 votes vote down vote up
public void testTextFieldReader() throws Exception {
  Field field = new TextField("foo", new StringReader("bar"));

  trySetByteValue(field);
  trySetBytesValue(field);
  trySetBytesRefValue(field);
  trySetDoubleValue(field);
  trySetIntValue(field);
  trySetFloatValue(field);
  trySetLongValue(field);
  field.setReaderValue(new StringReader("foobar"));
  trySetShortValue(field);
  trySetStringValue(field);
  field.setTokenStream(new CannedTokenStream(new Token("foo", 0, 3)));
    
  assertNotNull(field.readerValue());
}
 
Example 2
Source Project: lucene-solr   Source File: AnalyzingSuggesterTest.java    License: Apache License 2.0 6 votes vote down vote up
public void testTooManyExpansions() throws Exception {

    final Analyzer a = new Analyzer() {
      @Override
      protected TokenStreamComponents createComponents(String fieldName) {
        return new TokenStreamComponents(r -> {}, new CannedTokenStream(
            new Token("a", 0, 1),
            new Token("b", 0, 0, 1)));
      }
    };

    Directory tempDir = getDirectory();
    AnalyzingSuggester suggester = new AnalyzingSuggester(tempDir, "suggest", a, a, 0, 256, 1, true);
    suggester.build(new InputArrayIterator(new Input[] {new Input("a", 1)}));
    assertEquals("[a/1]", suggester.lookup("a", false, 1).toString());
    IOUtils.close(a, tempDir);
  }
 
Example 3
@SuppressWarnings("unused")
SetDictionary(String words, Analyzer analyzer) throws IOException {
	wordSet = new HashSet<String>();
	if (words != null) {
		TokenStream tokenStream = analyzer.tokenStream(NodeDocument.TEXT_FIELD, new StringReader(words));
		Token reusableToken = new Token();
		Token nextToken = null;

		//while ((nextToken = tokenStream.next(reusableToken)) != null) {
		//String term = nextToken.term();
		//if (term != null) {
		//wordSet.add(term);
		//}
		//}
	}
}
 
Example 4
Source Project: lucene-solr   Source File: TestTermAutomatonQuery.java    License: Apache License 2.0 6 votes vote down vote up
public void testTermDoesNotExist() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(newTextField("field", "x y z", Field.Store.NO));
  w.addDocument(doc);

  IndexReader r = w.getReader();
  IndexSearcher s = newSearcher(r);

  TokenStream ts = new CannedTokenStream(new Token[] {
      token("a", 1, 1),
    });

  TermAutomatonQuery q = new TokenStreamToTermAutomatonQuery().toQuery("field", ts);
  // System.out.println("DOT: " + q.toDot());
  assertEquals(0, s.search(q, 1).totalHits.value);

  w.close();
  r.close();
  dir.close();
}
 
Example 5
Source Project: lucene-solr   Source File: TestTermAutomatonQuery.java    License: Apache License 2.0 6 votes vote down vote up
public void testOneTermDoesNotExist() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(newTextField("field", "x y z", Field.Store.NO));
  w.addDocument(doc);

  IndexReader r = w.getReader();
  IndexSearcher s = newSearcher(r);

  TokenStream ts = new CannedTokenStream(new Token[] {
      token("a", 1, 1),
      token("x", 1, 1),
    });

  TermAutomatonQuery q = new TokenStreamToTermAutomatonQuery().toQuery("field", ts);
  // System.out.println("DOT: " + q.toDot());
  assertEquals(0, s.search(q, 1).totalHits.value);

  IOUtils.close(w, r, dir);
}
 
Example 6
Source Project: lucene-solr   Source File: TestPostingsOffsets.java    License: Apache License 2.0 6 votes vote down vote up
private void checkTokens(Token[] field1, Token[] field2) throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
  boolean success = false;
  try {
    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
    // store some term vectors for the checkindex cross-check
    ft.setStoreTermVectors(true);
    ft.setStoreTermVectorPositions(true);
    ft.setStoreTermVectorOffsets(true);
   
    Document doc = new Document();
    doc.add(new Field("body", new CannedTokenStream(field1), ft));
    doc.add(new Field("body", new CannedTokenStream(field2), ft));
    riw.addDocument(doc);
    riw.close();
    success = true;
  } finally {
    if (success) {
      IOUtils.close(dir);
    } else {
      IOUtils.closeWhileHandlingException(riw, dir);
    }
  }
}
 
Example 7
Source Project: lucene-solr   Source File: TestPostingsOffsets.java    License: Apache License 2.0 6 votes vote down vote up
private void checkTokens(Token[] tokens) throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
  boolean success = false;
  try {
    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
    // store some term vectors for the checkindex cross-check
    ft.setStoreTermVectors(true);
    ft.setStoreTermVectorPositions(true);
    ft.setStoreTermVectorOffsets(true);
   
    Document doc = new Document();
    doc.add(new Field("body", new CannedTokenStream(tokens), ft));
    riw.addDocument(doc);
    riw.close();
    success = true;
  } finally {
    if (success) {
      IOUtils.close(dir);
    } else {
      IOUtils.closeWhileHandlingException(riw, dir);
    }
  }
}
 
Example 8
public void testDups(final String expected, final Token... tokens)
  throws Exception {

  final Iterator<Token> toks = Arrays.asList(tokens).iterator();
  final TokenStream ts = new RemoveDuplicatesTokenFilter(
    (new TokenStream() {
        CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
        OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
        PositionIncrementAttribute posIncAtt = addAttribute(PositionIncrementAttribute.class);
        @Override
        public boolean incrementToken() {
          if (toks.hasNext()) {
            clearAttributes();
            Token tok = toks.next();
            termAtt.setEmpty().append(tok);
            offsetAtt.setOffset(tok.startOffset(), tok.endOffset());
            posIncAtt.setPositionIncrement(tok.getPositionIncrement());
            return true;
          } else {
            return false;
          }
        }
      }));
  
  assertTokenStreamContents(ts, expected.split("\\s"));   
}
 
Example 9
Source Project: lucene-solr   Source File: TestTrimFilter.java    License: Apache License 2.0 6 votes vote down vote up
public void testTrim() throws Exception {
  char[] a = " a ".toCharArray();
  char[] b = "b   ".toCharArray();
  char[] ccc = "cCc".toCharArray();
  char[] whitespace = "   ".toCharArray();
  char[] empty = "".toCharArray();

  TokenStream ts = new CannedTokenStream(new Token(new String(a, 0, a.length), 1, 5),
                  new Token(new String(b, 0, b.length), 6, 10),
                  new Token(new String(ccc, 0, ccc.length), 11, 15),
                  new Token(new String(whitespace, 0, whitespace.length), 16, 20),
                  new Token(new String(empty, 0, empty.length), 21, 21));
  ts = new TrimFilter(ts);

  assertTokenStreamContents(ts, new String[] { "a", "b", "cCc", "", ""});
}
 
Example 10
Source Project: lucene-solr   Source File: TestAsciiFoldingFilterFactory.java    License: Apache License 2.0 6 votes vote down vote up
public void testMultiTermAnalysis() throws IOException {
  TokenFilterFactory factory = new ASCIIFoldingFilterFactory(Collections.emptyMap());
  TokenStream stream = new CannedTokenStream(new Token("Été", 0, 3));
  stream = factory.create(stream);
  assertTokenStreamContents(stream, new String[] { "Ete" });

  stream = new CannedTokenStream(new Token("Été", 0, 3));
  stream = factory.normalize(stream);
  assertTokenStreamContents(stream, new String[] { "Ete" });

  factory = new ASCIIFoldingFilterFactory(new HashMap<>(Collections.singletonMap("preserveOriginal", "true")));
  stream = new CannedTokenStream(new Token("Été", 0, 3));
  stream = factory.create(stream);
  assertTokenStreamContents(stream, new String[] { "Ete", "Été" });

  stream = new CannedTokenStream(new Token("Été", 0, 3));
  stream = factory.normalize(stream);
  assertTokenStreamContents(stream, new String[] { "Ete" });
}
 
Example 11
Source Project: lucene-solr   Source File: TestProtectedTermFilter.java    License: Apache License 2.0 6 votes vote down vote up
public void testBasic() throws IOException {

    CannedTokenStream cts = new CannedTokenStream(
        new Token("Alice", 1, 0, 5),
        new Token("Bob", 1, 6, 9),
        new Token("Clara", 1, 10, 15),
        new Token("David", 1, 16, 21)
    );

    CharArraySet protectedTerms = new CharArraySet(5, true);
    protectedTerms.add("bob");

    TokenStream ts = new ProtectedTermFilter(protectedTerms, cts, LowerCaseFilter::new);
    assertTokenStreamContents(ts, new String[]{ "alice", "Bob", "clara", "david" });

  }
 
Example 12
Source Project: lucene-solr   Source File: TestConcatenatingTokenStream.java    License: Apache License 2.0 6 votes vote down vote up
public void testOffsetGaps() throws IOException {
  CannedTokenStream cts1 = new CannedTokenStream(2, 10,
      new Token("a", 0, 1), new Token("b", 2, 3));
  CannedTokenStream cts2 = new CannedTokenStream(2, 10,
      new Token("c", 0, 1), new Token("d", 2, 3));

  TokenStream ts = new ConcatenatingTokenStream(cts1, cts2);
  assertTokenStreamContents(ts,
      new String[] { "a", "b", "c", "d" },
      new int[]{      0,   2,   10,  12 },
      new int[]{      1,   3,   11,  13 },
      null,
      new int[]{      1,   1,   3,   1 },
      null, 20, 2, null, false, null
      );
}
 
Example 13
Source Project: lucene-solr   Source File: TestFlattenGraphFilter.java    License: Apache License 2.0 6 votes vote down vote up
public void testAlreadyFlatten() throws Exception {
  TokenStream in = new CannedTokenStream(0, 12, new Token[] {
      token("wtf", 1, 1, 0, 3),
      token("what", 0, 1, 0, 3),
      token("wow", 0, 1, 0, 3),
      token("the", 1, 1, 0, 3),
      token("that's", 0, 1, 0, 3),
      token("fudge", 1, 1, 0, 3),
      token("funny", 0, 1, 0, 3),
      token("happened", 1, 1, 4, 12)
    });

  TokenStream out = new FlattenGraphFilter(in);

  // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
  assertTokenStreamContents(out,
                            new String[] {"wtf", "what", "wow", "the", "that's", "fudge", "funny", "happened"},
                            new int[] {0, 0, 0, 0, 0, 0, 0, 4},
                            new int[] {3, 3, 3, 3, 3, 3, 3, 12},
                            new int[] {1, 0, 0, 1, 0, 1, 0, 1},
                            new int[] {1, 1, 1, 1, 1, 1, 1, 1},
                            12);
}
 
Example 14
Source Project: lucene-solr   Source File: TestFlattenGraphFilter.java    License: Apache License 2.0 6 votes vote down vote up
public void testNonGraph() throws Exception {
  TokenStream in = new CannedTokenStream(0, 22, new Token[] {
      token("hello", 1, 1, 0, 5),
      token("pseudo", 1, 1, 6, 12),
      token("world", 1, 1, 13, 18),
      token("fun", 1, 1, 19, 22),
    });


  TokenStream out = new FlattenGraphFilter(in);

  // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
  assertTokenStreamContents(out,
                            new String[] {"hello", "pseudo", "world", "fun"},
                            new int[] {0, 6, 13, 19},
                            new int[] {5, 12, 18, 22},
                            new int[] {1, 1, 1, 1},
                            new int[] {1, 1, 1, 1},
                            22);
}
 
Example 15
Source Project: lucene-solr   Source File: TestFlattenGraphFilter.java    License: Apache License 2.0 6 votes vote down vote up
public void testSimpleHole() throws Exception {
  TokenStream in = new CannedTokenStream(0, 13, new Token[] {
      token("hello", 1, 1, 0, 5),
      token("hole", 2, 1, 6, 10),
      token("fun", 1, 1, 11, 13),
    });


  TokenStream out = new FlattenGraphFilter(in);

  // ... but on output, it's flattened to wtf/what/wow that's/the fudge/funny happened:
  assertTokenStreamContents(out,
                            new String[] {"hello", "hole", "fun"},
                            new int[] {0, 6, 11},
                            new int[] {5, 10, 13},
                            new int[] {1, 2, 1},
                            new int[] {1, 1, 1},
                            13);
}
 
Example 16
Source Project: lucene-solr   Source File: TestFlattenGraphFilter.java    License: Apache License 2.0 6 votes vote down vote up
public void testHoleUnderSyn() throws Exception {
  // Tests a StopFilter after SynFilter where a stopword in a syn is removed
  //
  //   wizard of oz -> woz syn, but then "of" becomes a hole

  TokenStream in = new CannedTokenStream(0, 12, new Token[] {
      token("wizard", 1, 1, 0, 6),
      token("woz", 0, 3, 0, 12),
      token("oz", 2, 1, 10, 12),
    });


  TokenStream out = new FlattenGraphFilter(in);

  assertTokenStreamContents(out,
                            new String[] {"wizard", "woz", "oz"},
                            new int[] {0, 0, 10},
                            new int[] {6, 12, 12},
                            new int[] {1, 0, 2},
                            new int[] {1, 3, 1},
                            12);
}
 
Example 17
Source Project: lucene-solr   Source File: TestFlattenGraphFilter.java    License: Apache License 2.0 6 votes vote down vote up
public void testStrangelyNumberedNodes() throws Exception {

    // Uses only nodes 0, 2, 3, i.e. 1 is just never used (it is not a hole!!)
    TokenStream in = new CannedTokenStream(0, 27, new Token[] {
        token("dog", 1, 3, 0, 5),
        token("puppy", 0, 3, 0, 5),
        token("flies", 3, 1, 6, 11),
      });

    TokenStream out = new FlattenGraphFilter(in);

    assertTokenStreamContents(out,
                              new String[] {"dog", "puppy", "flies"},
                              new int[] {0, 0, 6},
                              new int[] {5, 5, 11},
                              new int[] {1, 0, 1},
                              new int[] {1, 1, 1},
                              27);
  }
 
Example 18
Source Project: lucene-solr   Source File: TestFieldInvertState.java    License: Apache License 2.0 6 votes vote down vote up
public void testBasic() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
  iwc.setSimilarity(NeverForgetsSimilarity.INSTANCE);
  IndexWriter w = new IndexWriter(dir, iwc);
  Document doc = new Document();
  Field field = new Field("field",
                          new CannedTokenStream(new Token("a", 0, 1),
                                                new Token("b", 2, 3),
                                                new Token("c", 4, 5)),
                          TextField.TYPE_NOT_STORED);
  doc.add(field);
  w.addDocument(doc);
  FieldInvertState fis = NeverForgetsSimilarity.INSTANCE.lastState;
  assertEquals(1, fis.getMaxTermFrequency());
  assertEquals(3, fis.getUniqueTermCount());
  assertEquals(0, fis.getNumOverlap());
  assertEquals(3, fis.getLength());
  IOUtils.close(w, dir);
}
 
Example 19
Source Project: lucene-solr   Source File: TestPostingsOffsets.java    License: Apache License 2.0 6 votes vote down vote up
public void testLegalbutVeryLargeOffsets() throws Exception {
  Directory dir = newDirectory();
  IndexWriter iw = new IndexWriter(dir, newIndexWriterConfig(null));
  Document doc = new Document();
  Token t1 = new Token("foo", 0, Integer.MAX_VALUE-500);
  if (random().nextBoolean()) {
    t1.setPayload(new BytesRef("test"));
  }
  Token t2 = new Token("foo", Integer.MAX_VALUE-500, Integer.MAX_VALUE);
  TokenStream tokenStream = new CannedTokenStream(
      new Token[] { t1, t2 }
  );
  FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
  ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
  // store some term vectors for the checkindex cross-check
  ft.setStoreTermVectors(true);
  ft.setStoreTermVectorPositions(true);
  ft.setStoreTermVectorOffsets(true);
  Field field = new Field("foo", tokenStream, ft);
  doc.add(field);
  iw.addDocument(doc);
  iw.close();
  dir.close();
}
 
Example 20
Source Project: lucene-solr   Source File: FixedShingleFilterTest.java    License: Apache License 2.0 6 votes vote down vote up
public void testWithStopwords() throws IOException {

    TokenStream ts = new CannedTokenStream(
        new Token("please", 0, 6),
        new Token("divide", 7, 13),
        new Token("sentence", 2, 19, 27),
        new Token("shingles", 2, 33, 41)
    );

    assertTokenStreamContents(new FixedShingleFilter(ts, 3),
        new String[]{"please divide _", "divide _ sentence", "sentence _ shingles"},
        new int[]{0, 7, 19,},
        new int[]{13, 27, 41,},
        new String[]{"shingle", "shingle", "shingle",},
        new int[]{1, 1, 2,});

  }
 
Example 21
Source Project: lucene-solr   Source File: FixedShingleFilterTest.java    License: Apache License 2.0 6 votes vote down vote up
public void testIncomingGraphs() throws IOException {

    // b/a c b/a d

    TokenStream ts = new CannedTokenStream(
        new Token("b", 0, 1),
        new Token("a", 0, 0, 1),
        new Token("c", 2, 3),
        new Token("b", 4, 5),
        new Token("a", 0, 4, 5),
        new Token("d", 6, 7)
    );

    assertTokenStreamContents(new FixedShingleFilter(ts, 2),
          new String[] { "b c", "a c", "c b", "c a", "b d", "a d" },
          new int[] {    0,     0,     2,     2,     4,     4 },
          new int[] {    3,     3,     5,     5,     7,     7 },
          new int[] {    1,     0,     1,     0,     1,     0 });
  }
 
Example 22
Source Project: lucene-solr   Source File: FixedShingleFilterTest.java    License: Apache License 2.0 6 votes vote down vote up
public void testShinglesSpanningGraphs() throws IOException {

    TokenStream ts = new CannedTokenStream(
        new Token("b", 0, 1),
        new Token("a", 0, 0, 1),
        new Token("c", 2, 3),
        new Token("b", 4, 5),
        new Token("a", 0, 4, 5),
        new Token("d", 6, 7)
    );

    assertTokenStreamContents(new FixedShingleFilter(ts, 3),
          new String[] { "b c b", "b c a", "a c b", "a c a", "c b d", "c a d" },
          new int[] {    0,        0,      0,       0,       2,        2,     },
          new int[] {    5,        5,      5,       5,       7,        7,     },
          new int[] {    1,        0,      0,       0,       1,        0,     });
  }
 
Example 23
Source Project: lucene-solr   Source File: FixedShingleFilterTest.java    License: Apache License 2.0 6 votes vote down vote up
public void testTrailingGraphsOfDifferingLengths() throws IOException {

    // a b:3/c d e f
    TokenStream ts = new CannedTokenStream(
        new Token("a", 0, 1),
        new Token("b", 1, 2, 3, 3),
        new Token("c", 0, 2, 3),
        new Token("d", 2, 3),
        new Token("e", 2, 3),
        new Token("f", 4, 5)
    );

    assertTokenStreamContents(new FixedShingleFilter(ts, 3),
        new String[]{ "a b f", "a c d", "c d e", "d e f"});

  }
 
Example 24
Source Project: lucene-solr   Source File: TestField.java    License: Apache License 2.0 6 votes vote down vote up
public void testTextFieldString() throws Exception {
  Field fields[] = new Field[] {
      new TextField("foo", "bar", Field.Store.NO),
      new TextField("foo", "bar", Field.Store.YES)
  };

  for (Field field : fields) {
    trySetByteValue(field);
    trySetBytesValue(field);
    trySetBytesRefValue(field);
    trySetDoubleValue(field);
    trySetIntValue(field);
    trySetFloatValue(field);
    trySetLongValue(field);
    trySetReaderValue(field);
    trySetShortValue(field);
    field.setStringValue("baz");
    field.setTokenStream(new CannedTokenStream(new Token("foo", 0, 3)));
    
    assertEquals("baz", field.stringValue());
  }
}
 
Example 25
Source Project: uyuni   Source File: NGramQuery.java    License: GNU General Public License v2.0 5 votes vote down vote up
/**
 * Constructor
 * @param field name of the field
 * @param queryTerms String containing a term or a series of terms to search.
 * The string will be parsed and will be broken up into a series of NGrams.
 * @throws IOException something went wrong parsing queryTerms
 * */
public NGramQuery(String field, String queryTerms, int min, int max)
    throws IOException {
    NGramAnalyzer nga = new NGramAnalyzer(min, max);
    TokenStream ngrams = nga.tokenStream(new StringReader(queryTerms));
    Token token;
    while ((token = ngrams.next()) != null) {
        Term t = new Term(field, new String(token.termBuffer()).trim());
        add(new TermQuery(t), BooleanClause.Occur.SHOULD);
    }
}
 
Example 26
Source Project: uyuni   Source File: NGramAnalyzerTest.java    License: GNU General Public License v2.0 5 votes vote down vote up
public void testTokenStream() throws Exception {
    NGramAnalyzer nga = new NGramAnalyzer(min_ngram, max_ngram);
    TokenStream ngrams = nga.tokenStream(new StringReader("aspell"));
    Token token;
    String result = new String("");
    while ((token = ngrams.next()) != null) {
        result += new String(token.termBuffer()).trim() + ",";
    }
    log.info("Created a ngram token stream, this is what it looks like: "
            + result);

    assertTrue("testTokenStream", result.compareTo("a,s,p,e,l,l,as,sp,pe," +
            "el,ll,asp,spe,pel,ell,aspe,spel,pell,aspel,spell,") == 0);

}
 
Example 27
Source Project: lucene-solr   Source File: TestPostingsOffsets.java    License: Apache License 2.0 5 votes vote down vote up
public void testNegativeOffsets() throws Exception {
  expectThrows(IllegalArgumentException.class, () -> {
    checkTokens(new Token[] { 
        makeToken("foo", 1, -1, -1)
    });
  });
}
 
Example 28
Source Project: lucene-solr   Source File: HighlighterTest.java    License: Apache License 2.0 5 votes vote down vote up
public void testNotRewriteMultiTermQuery() throws IOException {
  // field "bar": (not the field we ultimately want to extract)
  MultiTermQuery mtq = new TermRangeQuery("bar", new BytesRef("aa"), new BytesRef("zz"), true, true) ;
  WeightedSpanTermExtractor extractor = new WeightedSpanTermExtractor() {
    @Override
    protected void extract(Query query, float boost, Map<String, WeightedSpanTerm> terms) throws IOException {
      assertEquals(mtq, query);
      super.extract(query, boost, terms);
    }
  };
  extractor.setExpandMultiTermQuery(true);
  extractor.setMaxDocCharsToAnalyze(51200);
  extractor.getWeightedSpanTerms(
      mtq, 3, new CannedTokenStream(new Token("aa",0,2), new Token("bb", 2,4)), "foo"); // field "foo"
}
 
Example 29
Source Project: lucene-solr   Source File: TestMultiPhraseQuery.java    License: Apache License 2.0 5 votes vote down vote up
private Term[] tapTerms(Token[] tap) {
  Term[] terms = new Term[tap.length];
  for (int i=0; i<terms.length; i++) {
    terms[i] = new Term("field",tap[i].toString());
  }
  return terms;
}
 
Example 30
Source Project: lucene-solr   Source File: TestPostingsOffsets.java    License: Apache License 2.0 5 votes vote down vote up
public void testStackedTokens() throws Exception {
  checkTokens(new Token[] { 
      makeToken("foo", 1, 0, 3),
      makeToken("foo", 0, 0, 3),
      makeToken("foo", 0, 0, 3)
    });
}