org.apache.lucene.analysis.DelegatingAnalyzerWrapper Java Examples

The following examples show how to use org.apache.lucene.analysis.DelegatingAnalyzerWrapper. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FastVectorHighlighterTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private void matchedFieldsTestCase( boolean useMatchedFields, boolean fieldMatch, String fieldValue, String expected, Query... queryClauses ) throws IOException {
  Document doc = new Document();
  FieldType stored = new FieldType( TextField.TYPE_STORED );
  stored.setStoreTermVectorOffsets( true );
  stored.setStoreTermVectorPositions( true );
  stored.setStoreTermVectors( true );
  stored.freeze();
  FieldType matched = new FieldType( TextField.TYPE_NOT_STORED );
  matched.setStoreTermVectorOffsets( true );
  matched.setStoreTermVectorPositions( true );
  matched.setStoreTermVectors( true );
  matched.freeze();
  doc.add( new Field( "field", fieldValue, stored ) );               // Whitespace tokenized with English stop words
  doc.add( new Field( "field_exact", fieldValue, matched ) );        // Whitespace tokenized without stop words
  doc.add( new Field( "field_super_exact", fieldValue, matched ) );  // Whitespace tokenized without toLower
  doc.add( new Field( "field_characters", fieldValue, matched ) );   // Each letter is a token
  doc.add( new Field( "field_tripples", fieldValue, matched ) );     // Every three letters is a token
  doc.add( new Field( "field_sliced", fieldValue.substring( 0,       // Sliced at 10 chars then analyzed just like field
    Math.min( fieldValue.length() - 1 , 10 ) ), matched ) );
  doc.add( new Field( "field_der_red", new CannedTokenStream(        // Hacky field containing "der" and "red" at pos = 0
        token( "der", 1, 0, 3 ),
        token( "red", 0, 0, 3 )
      ), matched ) );

  final Map<String, Analyzer> fieldAnalyzers = new TreeMap<>();
  fieldAnalyzers.put( "field", new MockAnalyzer( random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET ) );
  fieldAnalyzers.put( "field_exact", new MockAnalyzer( random() ) );
  fieldAnalyzers.put( "field_super_exact", new MockAnalyzer( random(), MockTokenizer.WHITESPACE, false ) );
  fieldAnalyzers.put( "field_characters", new MockAnalyzer( random(), new CharacterRunAutomaton( new RegExp(".").toAutomaton() ), true ) );
  fieldAnalyzers.put( "field_tripples", new MockAnalyzer( random(), new CharacterRunAutomaton( new RegExp("...").toAutomaton() ), true ) );
  fieldAnalyzers.put( "field_sliced", fieldAnalyzers.get( "field" ) );
  fieldAnalyzers.put( "field_der_red", fieldAnalyzers.get( "field" ) );  // This is required even though we provide a token stream
  Analyzer analyzer = new DelegatingAnalyzerWrapper(Analyzer.PER_FIELD_REUSE_STRATEGY) {
    public Analyzer getWrappedAnalyzer(String fieldName) {
      return fieldAnalyzers.get( fieldName );
    }
  };

  Directory dir = newDirectory();
  IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(analyzer));
  writer.addDocument( doc );

  FastVectorHighlighter highlighter = new FastVectorHighlighter();
  FragListBuilder fragListBuilder = new SimpleFragListBuilder();
  FragmentsBuilder fragmentsBuilder = new ScoreOrderFragmentsBuilder();
  IndexReader reader = DirectoryReader.open(writer);
  String[] preTags = new String[] { "<b>" };
  String[] postTags = new String[] { "</b>" };
  Encoder encoder = new DefaultEncoder();
  int docId = 0;
  BooleanQuery.Builder query = new BooleanQuery.Builder();
  for ( Query clause : queryClauses ) {
    query.add( clause, Occur.MUST );
  }
  FieldQuery fieldQuery = new FieldQuery( query.build(), reader, true, fieldMatch );
  String[] bestFragments;
  if ( useMatchedFields ) {
    Set< String > matchedFields = new HashSet<>();
    matchedFields.add( "field" );
    matchedFields.add( "field_exact" );
    matchedFields.add( "field_super_exact" );
    matchedFields.add( "field_characters" );
    matchedFields.add( "field_tripples" );
    matchedFields.add( "field_sliced" );
    matchedFields.add( "field_der_red" );
    bestFragments = highlighter.getBestFragments( fieldQuery, reader, docId, "field", matchedFields, 25, 1,
      fragListBuilder, fragmentsBuilder, preTags, postTags, encoder );
  } else {
    bestFragments = highlighter.getBestFragments( fieldQuery, reader, docId, "field", 25, 1,
      fragListBuilder, fragmentsBuilder, preTags, postTags, encoder );
  }
  assertEquals( expected, bestFragments[ 0 ] );

  reader.close();
  writer.close();
  dir.close();
}
 
Example #2
Source File: LuceneTranslationMemory.java    From modernmt with Apache License 2.0 4 votes vote down vote up
public LuceneTranslationMemory(Directory directory, DocumentBuilder documentBuilder, QueryBuilder queryBuilder, Rescorer rescorer, AnalyzerFactory analyzerFactory, int minQuerySize) throws IOException {
    this.indexDirectory = directory;
    this.queryBuilder = queryBuilder;
    this.rescorer = rescorer;
    this.documentBuilder = documentBuilder;
    this.analyzerFactory = analyzerFactory;
    this.shortQueryAnalyzer = analyzerFactory.createShortQueryAnalyzer();
    this.longQueryAnalyzer = analyzerFactory.createLongQueryAnalyzer();
    this.minQuerySize = minQuerySize;

    // Index writer setup
    IndexWriterConfig indexConfig = new IndexWriterConfig(Version.LUCENE_4_10_4, new DelegatingAnalyzerWrapper(PER_FIELD_REUSE_STRATEGY) {
        @Override
        protected Analyzer getWrappedAnalyzer(String fieldName) {
            if (documentBuilder.isHashField(fieldName))
                return analyzerFactory.createHashAnalyzer();
            else
                return analyzerFactory.createContentAnalyzer();
        }
    });

    indexConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
    indexConfig.setSimilarity(analyzerFactory.createSimilarity());

    this.indexWriter = new IndexWriter(this.indexDirectory, indexConfig);

    // Ensure index exists
    if (!DirectoryReader.indexExists(directory))
        this.indexWriter.commit();

    // Read channels status
    IndexSearcher searcher = this.getIndexSearcher();

    Query query = this.queryBuilder.getChannels(this.documentBuilder);
    TopDocs docs = searcher.search(query, 1);

    if (docs.scoreDocs.length > 0) {
        Document channelsDocument = searcher.doc(docs.scoreDocs[0].doc);
        this.channels = this.documentBuilder.asChannels(channelsDocument);
    } else {
        this.channels = new HashMap<>();
    }
}