org.apache.lucene.analysis.standard.ClassicTokenizer Java Examples

The following examples show how to use org.apache.lucene.analysis.standard.ClassicTokenizer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestConditionalTokenFilter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testReadaheadWithNoFiltering() throws IOException {
  Analyzer analyzer = new Analyzer() {
    @Override
    protected TokenStreamComponents createComponents(String fieldName) {
      Tokenizer source = new ClassicTokenizer();
      TokenStream sink = new ConditionalTokenFilter(source, in -> new ShingleFilter(in, 2)) {
        @Override
        protected boolean shouldFilter() throws IOException {
          return true;
        }
      };
      return new TokenStreamComponents(source, sink);
    }
  };

  String input = "one two three four";

  try (TokenStream ts = analyzer.tokenStream("", input)) {
    assertTokenStreamContents(ts, new String[]{
        "one", "one two",
        "two", "two three",
        "three", "three four",
        "four"
    });
  }
}
 
Example #2
Source File: CommonAnalysisPlugin.java    From crate with Apache License 2.0 6 votes vote down vote up
@Override
public List<PreConfiguredTokenizer> getPreConfiguredTokenizers() {
    List<PreConfiguredTokenizer> tokenizers = new ArrayList<>();
    tokenizers.add(PreConfiguredTokenizer.singleton("keyword", KeywordTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("classic", ClassicTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("uax_url_email", UAX29URLEmailTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("path_hierarchy", PathHierarchyTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("edge_ngram",
        () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null));
    tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1), null));
    tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", XLowerCaseTokenizer::new, () -> new TokenFilterFactory() {
        @Override
        public String name() {
            return "lowercase";
        }

        @Override
        public TokenStream create(TokenStream tokenStream) {
            return new LowerCaseFilter(tokenStream);
        }
    }));

    // Temporary shim for aliases. TODO deprecate after they are moved
    tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new, null));

    return tokenizers;
}
 
Example #3
Source File: TestConditionalTokenFilter.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testReadaheadWithFiltering() throws IOException {

    CharArraySet protectedTerms = new CharArraySet(2, true);
    protectedTerms.add("three");

    Analyzer analyzer = new Analyzer() {
      @Override
      protected TokenStreamComponents createComponents(String fieldName) {
        Tokenizer source = new ClassicTokenizer();
        TokenStream sink = new ProtectedTermFilter(protectedTerms, source, in -> new ShingleFilter(in, 2));
        sink = new ValidatingTokenFilter(sink, "1");
        return new TokenStreamComponents(source, sink);
      }
    };

    String input = "one two three four";

    try (TokenStream ts = analyzer.tokenStream("", input)) {
      assertTokenStreamContents(ts, new String[]{
          "one", "one two", "two", "three", "four"
      }, new int[]{
           0,     0,         4,     8,       14
      }, new int[]{
           3,     7,         7,     13,      18
      }, new int[]{
           1,     0,         1,     1,       1
      }, new int[]{
           1,     2,         1,     1,       1
      }, 18);
    }
  }
 
Example #4
Source File: ClassicTokenizerFactory.java    From Elasticsearch with Apache License 2.0 4 votes vote down vote up
@Override
public Tokenizer create() {
    ClassicTokenizer tokenizer = new ClassicTokenizer();
    tokenizer.setMaxTokenLength(maxTokenLength);
    return tokenizer;
}
 
Example #5
Source File: ClassicTokenizerFactory.java    From crate with Apache License 2.0 4 votes vote down vote up
@Override
public Tokenizer create() {
    ClassicTokenizer tokenizer = new ClassicTokenizer();
    tokenizer.setMaxTokenLength(maxTokenLength);
    return tokenizer;
}