Java Code Examples for org.apache.lucene.analysis.util.ClasspathResourceLoader

The following examples show how to use org.apache.lucene.analysis.util.ClasspathResourceLoader. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: lucene-solr   Source File: TestOpenNLPPOSFilterFactory.java    License: Apache License 2.0 6 votes vote down vote up
public void testPOS() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .build();
  assertAnalyzesTo(analyzer, SENTENCES, SENTENCES_punc, SENTENCES_startOffsets, SENTENCES_endOffsets,
      SENTENCES_posTags, null, null, true);

  analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .addTokenFilter(TypeAsPayloadTokenFilterFactory.class)
      .build();
  assertAnalyzesTo(analyzer, SENTENCES, SENTENCES_punc, SENTENCES_startOffsets, SENTENCES_endOffsets,
      null, null, null, true, toPayloads(SENTENCES_posTags));
}
 
Example 2
Source Project: lucene-solr   Source File: TestOpenNLPTokenizerFactory.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testClose() throws IOException {
  Map<String,String> args = new HashMap<String,String>() {{ put("sentenceModel", "en-test-sent.bin");
                                                            put("tokenizerModel", "en-test-tokenizer.bin"); }};
  OpenNLPTokenizerFactory factory = new OpenNLPTokenizerFactory(args);
  factory.inform(new ClasspathResourceLoader(getClass()));

  Tokenizer ts = factory.create(newAttributeFactory());
  ts.setReader(new StringReader(SENTENCES));

  ts.reset();
  ts.close();
  ts.reset();
  ts.setReader(new StringReader(SENTENCES));
  assertTokenStreamContents(ts, SENTENCES_punc);
  ts.close();
  ts.reset();
  ts.setReader(new StringReader(SENTENCES));
  assertTokenStreamContents(ts, SENTENCES_punc);
}
 
Example 3
Source Project: lucene-solr   Source File: TestKeepFilterFactory.java    License: Apache License 2.0 6 votes vote down vote up
public void testInform() throws Exception {
  ResourceLoader loader = new ClasspathResourceLoader(getClass());
  assertTrue("loader is null and it shouldn't be", loader != null);
  KeepWordFilterFactory factory = (KeepWordFilterFactory) tokenFilterFactory("KeepWord",
      "words", "keep-1.txt",
      "ignoreCase", "true");
  CharArraySet words = factory.getWords();
  assertTrue("words is null and it shouldn't be", words != null);
  assertTrue("words Size: " + words.size() + " is not: " + 2, words.size() == 2);

  factory = (KeepWordFilterFactory) tokenFilterFactory("KeepWord",
      "words", "keep-1.txt, keep-2.txt",
      "ignoreCase", "true");
  words = factory.getWords();
  assertTrue("words is null and it shouldn't be", words != null);
  assertTrue("words Size: " + words.size() + " is not: " + 4, words.size() == 4);
}
 
Example 4
@Test
public void testThatASimpleQuerqyQParserFactoryIsCreatedIfOnlyTheParserClassIsConfigured() throws Exception {

    NamedList<NamedList<String>> args = mock(NamedList.class);
    when(args.get("parser")).thenReturn(parserConfig);

    when(parserConfig.get("factory")).thenReturn(null);
    when(parserConfig.get("class")).thenReturn("querqy.parser.WhiteSpaceQuerqyParser");
    ResourceLoader resourceLoader = new ClasspathResourceLoader(getClass().getClassLoader());

    final SolrQuerqyParserFactory factory = plugin.loadSolrQuerqyParserFactory(resourceLoader, args);

    assertNotNull(factory);
    assertTrue(factory instanceof SimpleQuerqyQParserFactory);
    SimpleQuerqyQParserFactory qParserFactory = (SimpleQuerqyQParserFactory) factory;
    assertEquals(WhiteSpaceQuerqyParser.class, qParserFactory.querqyParserClass);

}
 
Example 5
Source Project: lucene-solr   Source File: TestSuggestStopFilterFactory.java    License: Apache License 2.0 5 votes vote down vote up
public void testInform() throws Exception {
  ResourceLoader loader = new ClasspathResourceLoader(getClass());
  assertTrue("loader is null and it shouldn't be", loader != null);
  SuggestStopFilterFactory factory = createFactory(
      "words", "stop-1.txt",
      "ignoreCase", "true");
  CharArraySet words = factory.getStopWords();
  assertTrue("words is null and it shouldn't be", words != null);
  assertTrue("words Size: " + words.size() + " is not: " + 2, words.size() == 2);
  assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory.isIgnoreCase() == true);

  factory = createFactory("words", "stop-1.txt, stop-2.txt",
      "ignoreCase", "true");
  words = factory.getStopWords();
  assertTrue("words is null and it shouldn't be", words != null);
  assertTrue("words Size: " + words.size() + " is not: " + 4, words.size() == 4);
  assertTrue(factory.isIgnoreCase() + " does not equal: " + true, factory.isIgnoreCase() == true);

  factory = createFactory("words", "stop-snowball.txt",
      "format", "snowball",
      "ignoreCase", "true");
  words = factory.getStopWords();
  assertEquals(8, words.size());
  assertTrue(words.contains("he"));
  assertTrue(words.contains("him"));
  assertTrue(words.contains("his"));
  assertTrue(words.contains("himself"));
  assertTrue(words.contains("she"));
  assertTrue(words.contains("her"));
  assertTrue(words.contains("hers"));
  assertTrue(words.contains("herself"));

  // defaults
  factory = createFactory();
  assertEquals(EnglishAnalyzer.ENGLISH_STOP_WORDS_SET, factory.getStopWords());
  assertEquals(false, factory.isIgnoreCase());
}
 
Example 6
Source Project: lucene-solr   Source File: TestSuggestStopFilterFactory.java    License: Apache License 2.0 5 votes vote down vote up
private SuggestStopFilterFactory createFactory(String ... params) throws IOException {
  if(params.length%2 != 0) {
    throw new IllegalArgumentException("invalid keysAndValues map");
  }
  Map<String, String> args = new HashMap<>(params.length/2);
  for(int i=0; i<params.length; i+=2) {
    String previous = args.put(params[i], params[i+1]);
    assertNull("duplicate values for key: " + params[i], previous);
  }
  args.put("luceneMatchVersion", Version.LATEST.toString());

  SuggestStopFilterFactory factory = new SuggestStopFilterFactory(args);
  factory.inform(new ClasspathResourceLoader(getClass()));
  return factory;
}
 
Example 7
public void testBasic() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .addTokenFilter("opennlpChunker", "chunkerModel", chunkerModelFile)
      .build();
  assertAnalyzesTo(analyzer, SENTENCES, SENTENCES_punc, SENTENCES_startOffsets, SENTENCES_endOffsets,
      SENTENCES_chunks, null, null, true);
}
 
Example 8
public void testPayloads() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .addTokenFilter("opennlpChunker", "chunkerModel", chunkerModelFile)
      .addTokenFilter(TypeAsPayloadTokenFilterFactory.class)
      .build();
  assertAnalyzesTo(analyzer, SENTENCES, SENTENCES_punc, SENTENCES_startOffsets, SENTENCES_endOffsets,
      null, null, null, true, toPayloads(SENTENCES_chunks));
}
 
Example 9
Source Project: lucene-solr   Source File: TestOpenNLPPOSFilterFactory.java    License: Apache License 2.0 5 votes vote down vote up
public void testBasic() throws IOException {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .build();
  assertAnalyzesTo(analyzer, SENTENCES, SENTENCES_punc, SENTENCES_startOffsets, SENTENCES_endOffsets);
}
 
Example 10
Source Project: lucene-solr   Source File: TestOpenNLPPOSFilterFactory.java    License: Apache License 2.0 5 votes vote down vote up
public void testNoBreak() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .build();
  assertAnalyzesTo(analyzer, NO_BREAK, NO_BREAK_terms, NO_BREAK_startOffsets, NO_BREAK_endOffsets,
      null, null, null, true);
}
 
Example 11
Source Project: lucene-solr   Source File: TestOpenNLPTokenizerFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testTokenizer() throws IOException {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "sentenceModel", "en-test-sent.bin", "tokenizerModel", "en-test-tokenizer.bin")
      .build();
  assertAnalyzesTo(analyzer, SENTENCES, SENTENCES_punc, SENTENCES_startOffsets, SENTENCES_endOffsets);
  assertAnalyzesTo(analyzer, SENTENCE1, SENTENCE1_punc);
}
 
Example 12
Source Project: lucene-solr   Source File: TestOpenNLPTokenizerFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testTokenizerNoSentenceDetector() throws IOException {
  IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
    CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
        .withTokenizer("opennlp", "tokenizerModel", "en-test-tokenizer.bin")
        .build();
  });
  assertTrue(expected.getMessage().contains("Configuration Error: missing parameter 'sentenceModel'"));
}
 
Example 13
Source Project: lucene-solr   Source File: TestOpenNLPTokenizerFactory.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testTokenizerNoTokenizer() throws IOException {
  IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
    CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
        .withTokenizer("opennlp", "sentenceModel", "en-test-sent.bin")
        .build();
  });
  assertTrue(expected.getMessage().contains("Configuration Error: missing parameter 'tokenizerModel'"));
}
 
Example 14
public void test1SentenceDictionaryOnly() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", "en-test-pos-maxent.bin")
      .addTokenFilter("opennlplemmatizer", "dictionary", "en-test-lemmas.dict")
      .build();
  assertAnalyzesTo(analyzer, SENTENCE, SENTENCE_dict_punc, null, null,
      SENTENCE_posTags, null, null, true);
}
 
Example 15
public void test2SentencesDictionaryOnly() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .addTokenFilter("opennlplemmatizer", "dictionary", lemmatizerDictFile)
      .build();
  assertAnalyzesTo(analyzer, SENTENCES, SENTENCES_dict_punc, null, null,
      SENTENCES_posTags, null, null, true);
}
 
Example 16
public void test1SentenceMaxEntOnly() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .addTokenFilter("opennlplemmatizer", "lemmatizerModel", lemmatizerModelFile)
      .build();
  assertAnalyzesTo(analyzer, SENTENCE, SENTENCE_maxent_punc, null, null,
      SENTENCE_posTags, null, null, true);
}
 
Example 17
public void test2SentencesMaxEntOnly() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .addTokenFilter("OpenNLPLemmatizer", "lemmatizerModel", lemmatizerModelFile)
      .build();
  assertAnalyzesTo(analyzer, SENTENCES, SENTENCES_maxent_punc, null, null,
      SENTENCES_posTags, null, null, true);
}
 
Example 18
public void test1SentenceDictionaryAndMaxEnt() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", "en-test-pos-maxent.bin")
      .addTokenFilter("opennlplemmatizer", "dictionary", "en-test-lemmas.dict", "lemmatizerModel", lemmatizerModelFile)
      .build();
  assertAnalyzesTo(analyzer, SENTENCE_both, SENTENCE_both_punc, null, null,
      SENTENCE_both_posTags, null, null, true);
}
 
Example 19
public void test2SentencesDictionaryAndMaxEnt() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .addTokenFilter("opennlplemmatizer", "dictionary", lemmatizerDictFile, "lemmatizerModel", lemmatizerModelFile)
      .build();
  assertAnalyzesTo(analyzer, SENTENCES_both, SENTENCES_both_punc, null, null,
      SENTENCES_both_posTags, null, null, true);
}
 
Example 20
public void testKeywordAttributeAwarenessDictionaryOnly() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .addTokenFilter(KeywordRepeatFilterFactory.class)
      .addTokenFilter("opennlplemmatizer", "dictionary", lemmatizerDictFile)
      .addTokenFilter(RemoveDuplicatesTokenFilterFactory.class)
      .build();
  assertAnalyzesTo(analyzer, SENTENCES, SENTENCES_dict_keep_orig_punc, null, null,
      SENTENCES_keep_orig_posTags, null, null, true);
}
 
Example 21
public void testKeywordAttributeAwarenessMaxEntOnly() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .addTokenFilter(KeywordRepeatFilterFactory.class)
      .addTokenFilter("opennlplemmatizer", "lemmatizerModel", lemmatizerModelFile)
      .addTokenFilter(RemoveDuplicatesTokenFilterFactory.class)
      .build();
  assertAnalyzesTo(analyzer, SENTENCES, SENTENCES_max_ent_keep_orig_punc, null, null,
      SENTENCES_keep_orig_posTags, null, null, true);
}
 
Example 22
public void testKeywordAttributeAwarenessDictionaryAndMaxEnt() throws Exception {
  CustomAnalyzer analyzer = CustomAnalyzer.builder(new ClasspathResourceLoader(getClass()))
      .withTokenizer("opennlp", "tokenizerModel", tokenizerModelFile, "sentenceModel", sentenceModelFile)
      .addTokenFilter("opennlpPOS", "posTaggerModel", posTaggerModelFile)
      .addTokenFilter(KeywordRepeatFilterFactory.class)
      .addTokenFilter("opennlplemmatizer", "dictionary", lemmatizerDictFile, "lemmatizerModel", lemmatizerModelFile)
      .addTokenFilter(RemoveDuplicatesTokenFilterFactory.class)
      .build();
  assertAnalyzesTo(analyzer, SENTENCES_both, SENTENCES_both_keep_orig_punc, null, null,
      SENTENCES_both_keep_orig_posTags, null, null, true);
}
 
Example 23
Source Project: lucene-solr   Source File: TestICUTokenizerFactory.java    License: Apache License 2.0 5 votes vote down vote up
public void testMixedText() throws Exception {
  Reader reader = new StringReader("การที่ได้ต้องแสดงว่างานดี  This is a test ກວ່າດອກ");
  ICUTokenizerFactory factory = new ICUTokenizerFactory(new HashMap<String,String>());
  factory.inform(new ClasspathResourceLoader(getClass()));
  Tokenizer stream = factory.create(newAttributeFactory());
  stream.setReader(reader);
  assertTokenStreamContents(stream,
      new String[] { "การ", "ที่", "ได้", "ต้อง", "แสดง", "ว่า", "งาน", "ดี",
      "This", "is", "a", "test", "ກວ່າ", "ດອກ"});
}
 
Example 24
Source Project: lucene-solr   Source File: TestICUTokenizerFactory.java    License: Apache License 2.0 5 votes vote down vote up
public void testTokenizeLatinOnWhitespaceOnly() throws Exception {
  // “ U+201C LEFT DOUBLE QUOTATION MARK; ” U+201D RIGHT DOUBLE QUOTATION MARK
  Reader reader = new StringReader
      ("  Don't,break.at?/(punct)!  \u201Cnice\u201D\r\n\r\n85_At:all; `really\" +2=3$5,&813 [email protected]#%$^)(*@#$   ");
  final Map<String,String> args = new HashMap<>();
  args.put(ICUTokenizerFactory.RULEFILES, "Latn:Latin-break-only-on-whitespace.rbbi");
  ICUTokenizerFactory factory = new ICUTokenizerFactory(args);
  factory.inform(new ClasspathResourceLoader(this.getClass()));
  Tokenizer stream = factory.create(newAttributeFactory());
  stream.setReader(reader);
  assertTokenStreamContents(stream,
      new String[] { "Don't,break.at?/(punct)!", "\u201Cnice\u201D", "85_At:all;", "`really\"",  "+2=3$5,&813", "[email protected]#%$^)(*@#$" },
      new String[] { "<ALPHANUM>",               "<ALPHANUM>",       "<ALPHANUM>", "<ALPHANUM>", "<NUM>",       "<OTHER>" });
}
 
Example 25
Source Project: lucene-solr   Source File: TestICUTokenizerFactory.java    License: Apache License 2.0 5 votes vote down vote up
public void testTokenizeLatinDontBreakOnHyphens() throws Exception {
  Reader reader = new StringReader
      ("One-two punch.  Brang-, not brung-it.  This one--not that one--is the right one, -ish.");
  final Map<String,String> args = new HashMap<>();
  args.put(ICUTokenizerFactory.RULEFILES, "Latn:Latin-dont-break-on-hyphens.rbbi");
  ICUTokenizerFactory factory = new ICUTokenizerFactory(args);
  factory.inform(new ClasspathResourceLoader(getClass()));
  Tokenizer stream = factory.create(newAttributeFactory());
  stream.setReader(reader);
  assertTokenStreamContents(stream,
      new String[] { "One-two", "punch",
          "Brang", "not", "brung-it",
          "This", "one", "not", "that", "one", "is", "the", "right", "one", "ish" });
}
 
Example 26
Source Project: lucene-solr   Source File: TestICUTokenizerFactory.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Specify more than one script/rule file pair.
 * Override default DefaultICUTokenizerConfig Thai script tokenization.
 * Use the same rule file for both scripts.
 */
public void testKeywordTokenizeCyrillicAndThai() throws Exception {
  Reader reader = new StringReader
      ("Some English.  Немного русский.  ข้อความภาษาไทยเล็ก ๆ น้อย ๆ  More English.");
  final Map<String,String> args = new HashMap<>();
  args.put(ICUTokenizerFactory.RULEFILES, "Cyrl:KeywordTokenizer.rbbi,Thai:KeywordTokenizer.rbbi");
  ICUTokenizerFactory factory = new ICUTokenizerFactory(args);
  factory.inform(new ClasspathResourceLoader(getClass()));
  Tokenizer stream = factory.create(newAttributeFactory());
  stream.setReader(reader);
  assertTokenStreamContents(stream, new String[] { "Some", "English",
      "Немного русский.  ",
      "ข้อความภาษาไทยเล็ก ๆ น้อย ๆ  ",
      "More", "English" });
}
 
Example 27
Source Project: lucene-solr   Source File: TestPhoneticFilterFactory.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Case: default
 */
public void testFactoryDefaults() throws IOException {
  Map<String,String> args = new HashMap<>();
  args.put(PhoneticFilterFactory.ENCODER, "Metaphone");
  PhoneticFilterFactory factory = new PhoneticFilterFactory(args);
  factory.inform(new ClasspathResourceLoader(factory.getClass()));
  assertTrue(factory.getEncoder() instanceof Metaphone);
  assertTrue(factory.inject); // default
}
 
Example 28
Source Project: lucene-solr   Source File: TestPhoneticFilterFactory.java    License: Apache License 2.0 5 votes vote down vote up
public void testInjectFalse() throws IOException {
  Map<String,String> args = new HashMap<>();
  args.put(PhoneticFilterFactory.ENCODER, "Metaphone");
  args.put(PhoneticFilterFactory.INJECT, "false");
  PhoneticFilterFactory factory = new PhoneticFilterFactory(args);
  factory.inform(new ClasspathResourceLoader(factory.getClass()));
  assertFalse(factory.inject);
}
 
Example 29
Source Project: lucene-solr   Source File: TestPhoneticFilterFactory.java    License: Apache License 2.0 5 votes vote down vote up
public void testMaxCodeLength() throws IOException {
  Map<String,String> args = new HashMap<>();
  args.put(PhoneticFilterFactory.ENCODER, "Metaphone");
  args.put(PhoneticFilterFactory.MAX_CODE_LENGTH, "2");
  PhoneticFilterFactory factory = new PhoneticFilterFactory(args);
  factory.inform(new ClasspathResourceLoader(factory.getClass()));
  assertEquals(2, ((Metaphone) factory.getEncoder()).getMaxCodeLen());
}
 
Example 30
Source Project: lucene-solr   Source File: TestPhoneticFilterFactory.java    License: Apache License 2.0 5 votes vote down vote up
public void testUnknownEncoder() throws IOException {
  IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
    Map<String,String> args = new HashMap<>();
    args.put("encoder", "XXX");
    PhoneticFilterFactory factory = new PhoneticFilterFactory(args);
    factory.inform(new ClasspathResourceLoader(factory.getClass()));
  });
  assertTrue(expected.getMessage().contains("Error loading encoder"));
}