Java Code Examples for org.apache.lucene.analysis.charfilter.HTMLStripCharFilter

The following examples show how to use org.apache.lucene.analysis.charfilter.HTMLStripCharFilter. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: lucene-solr   Source File: HTMLStripTransformer.java    License: Apache License 2.0 6 votes vote down vote up
private Object stripHTML(String value, String column) {
  StringBuilder out = new StringBuilder();
  StringReader strReader = new StringReader(value);
  try {
    HTMLStripCharFilter html = new HTMLStripCharFilter(strReader.markSupported() ? strReader : new BufferedReader(strReader));
    char[] cbuf = new char[1024 * 10];
    while (true) {
      int count = html.read(cbuf);
      if (count == -1)
        break; // end of stream mark is -1
      if (count > 0)
        out.append(cbuf, 0, count);
    }
    html.close();
  } catch (IOException e) {
    throw new DataImportHandlerException(DataImportHandlerException.SEVERE,
            "Failed stripping HTML for column: " + column, e);
  }
  return out.toString();
}
 
Example 2
Source Project: lucene-solr   Source File: XmlInterpolationTest.java    License: Apache License 2.0 6 votes vote down vote up
private String[] analyzeReturnTokens(String docText) {
  List<String> result = new ArrayList<>();

  Reader filter = new HTMLStripCharFilter(new StringReader(docText),
          Collections.singleton("unescaped"));
  WhitespaceTokenizer ts = new WhitespaceTokenizer();
  final CharTermAttribute termAttribute = ts.addAttribute(CharTermAttribute.class);
  try {
    ts.setReader(filter);
    ts.reset();
    while (ts.incrementToken()) {
      result.add(termAttribute.toString());
    }
    ts.end();
  } catch (IOException e) {
    throw new RuntimeException(e);
  } finally {
    IOUtils.closeQuietly(ts);
  }
  return result.toArray(new String[result.size()]);
}
 
Example 3
Source Project: SolrTextTagger   Source File: XmlInterpolationTest.java    License: Apache License 2.0 6 votes vote down vote up
private String[] analyzeReturnTokens(String docText) {
  List<String> result = new ArrayList<>();

  Reader filter = new HTMLStripCharFilter(new StringReader(docText),
          Collections.singleton("unescaped"));
  WhitespaceTokenizer ts = new WhitespaceTokenizer();
  final CharTermAttribute termAttribute = ts.addAttribute(CharTermAttribute.class);
  try {
    ts.setReader(filter);
    ts.reset();
    while (ts.incrementToken()) {
      result.add(termAttribute.toString());
    }
    ts.end();
  } catch (IOException e) {
    throw new RuntimeException(e);
  } finally {
    IOUtils.closeQuietly(ts);
  }
  return result.toArray(new String[result.size()]);
}
 
Example 4
Source Project: lucene-solr   Source File: XmlInterpolationTest.java    License: Apache License 2.0 5 votes vote down vote up
private int[] analyzeTagOne(String docText, String start, String end) {
  int[] result = {-1, -1};

  Reader filter = new HTMLStripCharFilter(new StringReader(docText));

  WhitespaceTokenizer ts = new WhitespaceTokenizer();
  final CharTermAttribute termAttribute = ts.addAttribute(CharTermAttribute.class);
  final OffsetAttribute offsetAttribute = ts.addAttribute(OffsetAttribute.class);
  try {
    ts.setReader(filter);
    ts.reset();
    while (ts.incrementToken()) {
      final String termString = termAttribute.toString();
      if (termString.equals(start))
        result[0] = offsetAttribute.startOffset();
      if (termString.equals(end)) {
        result[1] = offsetAttribute.endOffset();
        return result;
      }
    }
    ts.end();
  } catch (IOException e) {
    throw new RuntimeException(e);
  } finally {
    IOUtils.closeQuietly(ts);
  }
  return result;
}
 
Example 5
Source Project: SolrTextTagger   Source File: XmlInterpolationTest.java    License: Apache License 2.0 5 votes vote down vote up
private int[] analyzeTagOne(String docText, String start, String end) {
  int[] result = {-1, -1};

  Reader filter = new HTMLStripCharFilter(new StringReader(docText));

  WhitespaceTokenizer ts = new WhitespaceTokenizer();
  final CharTermAttribute termAttribute = ts.addAttribute(CharTermAttribute.class);
  final OffsetAttribute offsetAttribute = ts.addAttribute(OffsetAttribute.class);
  try {
    ts.setReader(filter);
    ts.reset();
    while (ts.incrementToken()) {
      final String termString = termAttribute.toString();
      if (termString.equals(start))
        result[0] = offsetAttribute.startOffset();
      if (termString.equals(end)) {
        result[1] = offsetAttribute.endOffset();
        return result;
      }
    }
    ts.end();
  } catch (IOException e) {
    throw new RuntimeException(e);
  } finally {
    IOUtils.closeQuietly(ts);
  }
  return result;
}
 
Example 6
Source Project: Elasticsearch   Source File: HtmlStripCharFilterFactory.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public Reader create(Reader tokenStream) {
    return new HTMLStripCharFilter(tokenStream, escapedTags);
}
 
Example 7
Source Project: crate   Source File: CommonAnalysisPlugin.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public List<PreConfiguredCharFilter> getPreConfiguredCharFilters() {
    List<PreConfiguredCharFilter> filters = new ArrayList<>();
    filters.add(PreConfiguredCharFilter.singleton("html_strip", false, HTMLStripCharFilter::new));
    return filters;
}
 
Example 8
Source Project: crate   Source File: HtmlStripCharFilterFactory.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public Reader create(Reader tokenStream) {
    return new HTMLStripCharFilter(tokenStream, escapedTags);
}