Java Code Examples for org.apache.lucene.analysis.core.LetterTokenizer

The following examples show how to use org.apache.lucene.analysis.core.LetterTokenizer. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: lucene-solr   Source File: TestCharTokenizers.java    License: Apache License 2.0 6 votes vote down vote up
public void testReadSupplementaryChars() throws IOException {
  StringBuilder builder = new StringBuilder();
  // create random input
  int num = 1024 + random().nextInt(1024);
  num *= RANDOM_MULTIPLIER;
  for (int i = 1; i < num; i++) {
    builder.append("\ud801\udc1cabc");
    if((i % 10) == 0)
      builder.append(" ");
  }
  // internal buffer size is 1024 make sure we have a surrogate pair right at the border
  builder.insert(1023, "\ud801\udc1c");
  Tokenizer tokenizer = new LetterTokenizer(newAttributeFactory());
  tokenizer.setReader(new StringReader(builder.toString()));
  assertTokenStreamContents(new LowerCaseFilter(tokenizer), builder.toString().toLowerCase(Locale.ROOT).split(" "));
}
 
Example 2
Source Project: lucene-solr   Source File: TestGermanAnalyzer.java    License: Apache License 2.0 5 votes vote down vote up
public void testWithKeywordAttribute() throws IOException {
  CharArraySet set = new CharArraySet( 1, true);
  set.add("fischen");
  final Tokenizer in = new LetterTokenizer();
  in.setReader(new StringReader("Fischen Trinken"));
  GermanStemFilter filter = new GermanStemFilter(
      new SetKeywordMarkerFilter(new LowerCaseFilter(in), set));
  assertTokenStreamContents(filter, new String[] { "fischen", "trink" });
}
 
Example 3
Source Project: lucene-solr   Source File: TestBrazilianAnalyzer.java    License: Apache License 2.0 5 votes vote down vote up
public void testWithKeywordAttribute() throws IOException {
  CharArraySet set = new CharArraySet(1, true);
  set.add("Brasília");
  Tokenizer tokenizer = new LetterTokenizer();
  tokenizer.setReader(new StringReader("Brasília Brasilia"));
  BrazilianStemFilter filter = new BrazilianStemFilter(new SetKeywordMarkerFilter(new LowerCaseFilter(tokenizer), set));

  assertTokenStreamContents(filter, new String[] { "brasília", "brasil" });
}
 
Example 4
Source Project: lucene-solr   Source File: TestCharTokenizers.java    License: Apache License 2.0 5 votes vote down vote up
public void testExtendCharBuffer() throws IOException {
  for (int i = 0; i < 40; i++) {
    StringBuilder builder = new StringBuilder();
    for (int j = 0; j < 1+i; j++) {
      builder.append("a");
    }
    builder.append("\ud801\udc1cabc");
    Tokenizer tokenizer = new LetterTokenizer(newAttributeFactory());
    tokenizer.setReader(new StringReader(builder.toString()));
    assertTokenStreamContents(new LowerCaseFilter(tokenizer), new String[] {builder.toString().toLowerCase(Locale.ROOT)});
  }
}
 
Example 5
Source Project: lucene-solr   Source File: TestCharTokenizers.java    License: Apache License 2.0 5 votes vote down vote up
public void testMaxWordLength() throws IOException {
  StringBuilder builder = new StringBuilder();

  for (int i = 0; i < 255; i++) {
    builder.append("A");
  }
  Tokenizer tokenizer = new LetterTokenizer(newAttributeFactory());
  tokenizer.setReader(new StringReader(builder.toString() + builder.toString()));
  assertTokenStreamContents(new LowerCaseFilter(tokenizer), new String[] {builder.toString().toLowerCase(Locale.ROOT), builder.toString().toLowerCase(Locale.ROOT)});
}
 
Example 6
Source Project: lucene-solr   Source File: TestCharTokenizers.java    License: Apache License 2.0 5 votes vote down vote up
public void testMaxWordLengthWithSupplementary() throws IOException {
  StringBuilder builder = new StringBuilder();

  for (int i = 0; i < 254; i++) {
    builder.append("A");
  }
  builder.append("\ud801\udc1c");
  Tokenizer tokenizer = new LetterTokenizer(newAttributeFactory());
  tokenizer.setReader(new StringReader(builder.toString() + builder.toString()));
  assertTokenStreamContents(new LowerCaseFilter(tokenizer), new String[] {builder.toString().toLowerCase(Locale.ROOT), builder.toString().toLowerCase(Locale.ROOT)});
}
 
Example 7
Source Project: crate   Source File: CommonAnalysisPlugin.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public List<PreConfiguredTokenizer> getPreConfiguredTokenizers() {
    List<PreConfiguredTokenizer> tokenizers = new ArrayList<>();
    tokenizers.add(PreConfiguredTokenizer.singleton("keyword", KeywordTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("classic", ClassicTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("uax_url_email", UAX29URLEmailTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("path_hierarchy", PathHierarchyTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("edge_ngram",
        () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null));
    tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1), null));
    tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", XLowerCaseTokenizer::new, () -> new TokenFilterFactory() {
        @Override
        public String name() {
            return "lowercase";
        }

        @Override
        public TokenStream create(TokenStream tokenStream) {
            return new LowerCaseFilter(tokenStream);
        }
    }));

    // Temporary shim for aliases. TODO deprecate after they are moved
    tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new, null));

    return tokenizers;
}
 
Example 8
Source Project: Elasticsearch   Source File: LetterTokenizerFactory.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public Tokenizer create() {
    return new LetterTokenizer();
}
 
Example 9
Source Project: lucene-solr   Source File: TestCustomAnalyzer.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public Tokenizer create(AttributeFactory factory) {
  return new LetterTokenizer(factory);
}
 
Example 10
Source Project: crate   Source File: LetterTokenizerFactory.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public Tokenizer create() {
    return new LetterTokenizer();
}