org.apache.lucene.analysis.path.PathHierarchyTokenizer Java Examples

The following examples show how to use org.apache.lucene.analysis.path.PathHierarchyTokenizer. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PathHierarchyTokenizerFactory.java    From Elasticsearch with Apache License 2.0 7 votes vote down vote up
@Inject
public PathHierarchyTokenizerFactory(Index index, IndexSettingsService indexSettingsService, @Assisted String name, @Assisted Settings settings) {
    super(index, indexSettingsService.getSettings(), name, settings);
    bufferSize = settings.getAsInt("buffer_size", 1024);
    String delimiter = settings.get("delimiter");
    if (delimiter == null) {
        this.delimiter = PathHierarchyTokenizer.DEFAULT_DELIMITER;
    } else if (delimiter.length() > 1) {
        throw new IllegalArgumentException("delimiter can only be a one char value");
    } else {
        this.delimiter = delimiter.charAt(0);
    }

    String replacement = settings.get("replacement");
    if (replacement == null) {
        this.replacement = this.delimiter;
    } else if (replacement.length() > 1) {
        throw new IllegalArgumentException("replacement can only be a one char value");
    } else {
        this.replacement = replacement.charAt(0);
    }
    this.skip = settings.getAsInt("skip", PathHierarchyTokenizer.DEFAULT_SKIP);
    this.reverse = settings.getAsBoolean("reverse", false);
}
 
Example #2
Source File: PathHierarchyTokenizerFactory.java    From crate with Apache License 2.0 7 votes vote down vote up
PathHierarchyTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
    super(indexSettings, name, settings);
    bufferSize = settings.getAsInt("buffer_size", 1024);
    String delimiter = settings.get("delimiter");
    if (delimiter == null) {
        this.delimiter = PathHierarchyTokenizer.DEFAULT_DELIMITER;
    } else if (delimiter.length() != 1) {
        throw new IllegalArgumentException("delimiter must be a one char value");
    } else {
        this.delimiter = delimiter.charAt(0);
    }

    String replacement = settings.get("replacement");
    if (replacement == null) {
        this.replacement = this.delimiter;
    } else if (replacement.length() != 1) {
        throw new IllegalArgumentException("replacement must be a one char value");
    } else {
        this.replacement = replacement.charAt(0);
    }
    this.skip = settings.getAsInt("skip", PathHierarchyTokenizer.DEFAULT_SKIP);
    this.reverse = settings.getAsBoolean("reverse", false);
}
 
Example #3
Source File: CommonAnalysisPlugin.java    From crate with Apache License 2.0 6 votes vote down vote up
@Override
public List<PreConfiguredTokenizer> getPreConfiguredTokenizers() {
    List<PreConfiguredTokenizer> tokenizers = new ArrayList<>();
    tokenizers.add(PreConfiguredTokenizer.singleton("keyword", KeywordTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("classic", ClassicTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("uax_url_email", UAX29URLEmailTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("path_hierarchy", PathHierarchyTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("edge_ngram",
        () -> new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE), null));
    tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1), null));
    tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new, null));
    tokenizers.add(PreConfiguredTokenizer.singleton("lowercase", XLowerCaseTokenizer::new, () -> new TokenFilterFactory() {
        @Override
        public String name() {
            return "lowercase";
        }

        @Override
        public TokenStream create(TokenStream tokenStream) {
            return new LowerCaseFilter(tokenStream);
        }
    }));

    // Temporary shim for aliases. TODO deprecate after they are moved
    tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new, null));

    return tokenizers;
}
 
Example #4
Source File: PathHierarchyTokenizerFactory.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public Tokenizer create() {
    if (reverse) {
        return new ReversePathHierarchyTokenizer(bufferSize, delimiter, replacement, skip);
    }
    return new PathHierarchyTokenizer(bufferSize, delimiter, replacement, skip);
}
 
Example #5
Source File: URLTokenizer.java    From elasticsearch-analysis-url with Apache License 2.0 5 votes vote down vote up
private List<Token> getPathTokens(String url, String partStringRaw, String partString) throws IOException {
    int start = getStartIndex(url, partStringRaw);
    if (!tokenizePath) {
        int end = getEndIndex(start, partStringRaw);
        return Collections.singletonList(new Token(partString, URLPart.PATH, start, end));
    }
    return tokenize(URLPart.PATH, addReader(new PathHierarchyTokenizer('/', '/'), new StringReader(partString)), start);
}
 
Example #6
Source File: PathHierarchyTokenizerFactory.java    From crate with Apache License 2.0 5 votes vote down vote up
@Override
public Tokenizer create() {
    if (reverse) {
        return new ReversePathHierarchyTokenizer(bufferSize, delimiter, replacement, skip);
    }
    return new PathHierarchyTokenizer(bufferSize, delimiter, replacement, skip);
}