Java Code Examples for org.apache.lucene.analysis.pattern.PatternTokenizer

The following examples show how to use org.apache.lucene.analysis.pattern.PatternTokenizer. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: elasticsearch-analysis-url   Source File: URLTokenizer.java    License: Apache License 2.0 5 votes vote down vote up
private List<Token> getQueryTokens(String url, String partStringRaw, String partString) throws IOException {
    int start = getStartIndex(url, partStringRaw);
    if (!tokenizeQuery) {
        int end = getEndIndex(start, partStringRaw);
        return Collections.singletonList(new Token(partString, URLPart.QUERY, start, end));
    }
    return tokenize(URLPart.QUERY, addReader(new PatternTokenizer(QUERY_SEPARATOR, -1), new StringReader(partString)), start);
}
 
Example 2
/**
 * The second phase of the life-cycle, used for sanity checking.
 */
public void validate() {
	if (pattern != null ) {
		if ( className != null && className != PatternTokenizer.class.getName()) {
			throw new RuntimeException("Bad Option: Language range "+languageRange + " with pattern propety for class "+ className);
		}
		className = PatternTokenizer.class.getName();
	}
	if (this.wordBoundary != null  ) {
		if ( className != null && className != TermCompletionAnalyzer.class.getName()) {
			throw new RuntimeException("Bad Option: Language range "+languageRange + " with pattern propety for class "+ className);
		}
		className = TermCompletionAnalyzer.class.getName();
		
		if ( subWordBoundary == null ) {
			subWordBoundary = AnalyzerOptions.DEFAULT_SUB_WORD_BOUNDARY;
		}
		if ( alwaysRemoveSoftHyphens != null && softHyphens == null ) {
			throw new RuntimeException("Bad option: Language range "+languageRange + ": must specify softHypens when setting alwaysRemoveSoftHyphens");		
		}
		if (softHyphens != null && alwaysRemoveSoftHyphens == null) {
			alwaysRemoveSoftHyphens = AnalyzerOptions.DEFAULT_ALWAYS_REMOVE_SOFT_HYPHENS;
		}
		
	} else if ( subWordBoundary != null || softHyphens != null || alwaysRemoveSoftHyphens != null ||
			TermCompletionAnalyzer.class.getName().equals(className) ) {
		throw new RuntimeException("Bad option: Language range "+languageRange + ": must specify wordBoundary for TermCompletionAnalyzer");
	}
	
	if (PatternTokenizer.class.getName().equals(className) && pattern == null ) {
		throw new RuntimeException("Bad Option: Language range "+languageRange + " must specify pattern for PatternTokenizer.");
	}
	if ( (like != null) == (className != null) ) {
		throw new RuntimeException("Bad Option: Language range "+languageRange + " must specify exactly one of implementation class or like.");
	}
	if (stopwords != null && like != null) {
		throw new RuntimeException("Bad Option: Language range "+languageRange + " must not specify stopwords with like.");
	}
	
}
 
Example 3
Source Project: database   Source File: PatternAnalyzerImpl.java    License: GNU General Public License v2.0 5 votes vote down vote up
@Override
protected TokenStreamComponents createComponents(final String field) {
	//Use default grouping
	final Tokenizer tokenizer = new PatternTokenizer(pattern,-1);
	final TokenStream filter = new LowerCaseFilter(tokenizer);
	return new TokenStreamComponents(tokenizer, filter);
}
 
Example 4
Source Project: crate   Source File: CommonAnalysisPlugin.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public List<PreConfiguredTokenizer> getPreConfiguredTokenizers() {
    List<PreConfiguredTokenizer> tokenizers = new ArrayList<>();
    tokenizers.add(PreConfiguredTokenizer.singleton("keyword", KeywordTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("classic", ClassicTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("uax_url_email", UAX29URLEmailTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("path_hierarchy", PathHierarchyTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("edge_ngram",
        () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null));
    tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1), null));
    tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", XLowerCaseTokenizer::new, () -> new TokenFilterFactory() {
        @Override
        public String name() {
            return "lowercase";
        }

        @Override
        public TokenStream create(TokenStream tokenStream) {
            return new LowerCaseFilter(tokenStream);
        }
    }));

    // Temporary shim for aliases. TODO deprecate after they are moved
    tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new, null));

    return tokenizers;
}
 
Example 5
Source Project: Elasticsearch   Source File: PatternTokenizerFactory.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public Tokenizer create() {
    return new PatternTokenizer(pattern, group);
}
 
Example 6
Source Project: crate   Source File: PatternTokenizerFactory.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public Tokenizer create() {
    return new PatternTokenizer(pattern, group);
}