org.apache.lucene.index.FilterLeafReader Java Examples

The following examples show how to use org.apache.lucene.index.FilterLeafReader. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestQueryBitSetProducer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public DummyDirectoryReader(DirectoryReader in) throws IOException {
  super(in, new SubReaderWrapper() {
    @Override
    public LeafReader wrap(LeafReader reader) {
      return new FilterLeafReader(reader) {

        @Override
        public CacheHelper getCoreCacheHelper() {
          return null;
        }

        @Override
        public CacheHelper getReaderCacheHelper() {
          return null;
        }};
    }
  });
}
 
Example #2
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public DummyDirectoryReader(DirectoryReader in) throws IOException {
  super(in, new SubReaderWrapper() {
    @Override
    public LeafReader wrap(LeafReader reader) {
      return new FilterLeafReader(reader) {
        @Override
        public CacheHelper getCoreCacheHelper() {
          return null;
        }
        @Override
        public CacheHelper getReaderCacheHelper() {
          return null;
        }
      };
    }
  });
}
 
Example #3
Source File: LukeRequestHandler.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** Returns the sum of RAM bytes used by each segment */
private static long getIndexHeapUsed(DirectoryReader reader) {
  return reader.leaves().stream()
      .map(LeafReaderContext::reader)
      .map(FilterLeafReader::unwrap)
      .map(leafReader -> {
        if (leafReader instanceof Accountable) {
          return ((Accountable) leafReader).ramBytesUsed();
        } else {
          return -1L; // unsupported
        }
      })
      .mapToLong(Long::longValue)
      .reduce(0, (left, right) -> left == -1 || right == -1 ? -1 : left + right);
  // if any leaves are unsupported (-1), we ultimately return -1.
}
 
Example #4
Source File: Engine.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
/**
 * Tries to extract a segment reader from the given index reader.
 * If no SegmentReader can be extracted an {@link IllegalStateException} is thrown.
 */
protected static SegmentReader segmentReader(LeafReader reader) {
    if (reader instanceof SegmentReader) {
        return (SegmentReader) reader;
    } else if (reader instanceof FilterLeafReader) {
        final FilterLeafReader fReader = (FilterLeafReader) reader;
        return segmentReader(FilterLeafReader.unwrap(fReader));
    }
    // hard fail - we can't get a SegmentReader
    throw new IllegalStateException("Can not extract segment reader from given index reader [" + reader + "]");
}
 
Example #5
Source File: TestUnifiedHighlighterTermVec.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public LeafReader wrap(LeafReader reader) {
  return new FilterLeafReader(reader) {
    BitSet seenDocIDs = new BitSet();

    @Override
    public Fields getTermVectors(int docID) throws IOException {
      // if we're invoked by ParallelLeafReader then we can't do our assertion. TODO see LUCENE-6868
      if (callStackContains(ParallelLeafReader.class) == false
          && callStackContains(CheckIndex.class) == false) {
        assertFalse("Should not request TVs for doc more than once.", seenDocIDs.get(docID));
        seenDocIDs.set(docID);
      }

      return super.getTermVectors(docID);
    }

    @Override
    public CacheHelper getCoreCacheHelper() {
      return null;
    }

    @Override
    public CacheHelper getReaderCacheHelper() {
      return null;
    }
  };
}
 
Example #6
Source File: AnalyzingInfixSuggester.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public long ramBytesUsed() {
  long mem = RamUsageEstimator.shallowSizeOf(this);
  try {
    if (searcherMgr != null) {
      SearcherManager mgr;
      IndexSearcher searcher;
      synchronized (searcherMgrLock) {
        mgr = searcherMgr; // acquire & release on same SearcherManager, via local reference
        searcher = mgr.acquire();
      }
      try {
        for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
          LeafReader reader = FilterLeafReader.unwrap(context.reader());
          if (reader instanceof SegmentReader) {
            mem += ((SegmentReader) context.reader()).ramBytesUsed();
          }
        }
      } finally {
        mgr.release(searcher);
      }
    }
    return mem;
  } catch (IOException ioe) {
    throw new RuntimeException(ioe);
  }
}
 
Example #7
Source File: AnalyzingInfixSuggester.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public Collection<Accountable> getChildResources() {
  List<Accountable> resources = new ArrayList<>();
  try {
    if (searcherMgr != null) {
      SearcherManager mgr;
      IndexSearcher searcher;
      synchronized (searcherMgrLock) {
        mgr = searcherMgr; // acquire & release on same SearcherManager, via local reference
        searcher = mgr.acquire();
      }
      try {
        for (LeafReaderContext context : searcher.getIndexReader().leaves()) {
          LeafReader reader = FilterLeafReader.unwrap(context.reader());
          if (reader instanceof SegmentReader) {
            resources.add(Accountables.namedAccountable("segment", (SegmentReader)reader));
          }
        }
      } finally {
        mgr.release(searcher);
      }
    }
    return Collections.unmodifiableList(resources);
  } catch (IOException ioe) {
    throw new RuntimeException(ioe);
  }
}
 
Example #8
Source File: TestTermScorer.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testDoesNotLoadNorms() throws IOException {
  Term allTerm = new Term(FIELD, "all");
  TermQuery termQuery = new TermQuery(allTerm);
  
  LeafReader forbiddenNorms = new FilterLeafReader(indexReader) {
    @Override
    public NumericDocValues getNormValues(String field) throws IOException {
      fail("Norms should not be loaded");
      // unreachable
      return null;
    }

    @Override
    public CacheHelper getCoreCacheHelper() {
      return in.getCoreCacheHelper();
    }

    @Override
    public CacheHelper getReaderCacheHelper() {
      return in.getReaderCacheHelper();
    }
  };
  // We don't use newSearcher because it sometimes runs checkIndex which loads norms
  IndexSearcher indexSearcher = new IndexSearcher(forbiddenNorms);
  
  Weight weight = indexSearcher.createWeight(termQuery, ScoreMode.COMPLETE, 1);
  expectThrows(AssertionError.class, () -> {
    weight.scorer(forbiddenNorms.getContext()).iterator().nextDoc();
  });
  
  Weight weight2 = indexSearcher.createWeight(termQuery, ScoreMode.COMPLETE_NO_SCORES, 1);
  // should not fail this time since norms are not necessary
  weight2.scorer(forbiddenNorms.getContext()).iterator().nextDoc();
}
 
Example #9
Source File: TestSearcherManager.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public MyFilterDirectoryReader(DirectoryReader in) throws IOException {
  super(in, 
        new FilterDirectoryReader.SubReaderWrapper() {
          @Override
          public LeafReader wrap(LeafReader reader) {
            FilterLeafReader wrapped = new MyFilterLeafReader(reader);
            assertEquals(reader, wrapped.getDelegate());
            return wrapped;
          }
        });
}
 
Example #10
Source File: LegacyNumericUtils.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private static Terms intTerms(Terms terms) {
  return new FilterLeafReader.FilterTerms(terms) {
      @Override
      public TermsEnum iterator() throws IOException {
        return filterPrefixCodedInts(in.iterator());
      }
    };
}
 
Example #11
Source File: LegacyNumericUtils.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private static Terms longTerms(Terms terms) {
  return new FilterLeafReader.FilterTerms(terms) {
      @Override
      public TermsEnum iterator() throws IOException {
        return filterPrefixCodedLongs(in.iterator());
      }
    };
}
 
Example #12
Source File: ElasticsearchLeafReader.java    From crate with Apache License 2.0 5 votes vote down vote up
public static ElasticsearchLeafReader getElasticsearchLeafReader(LeafReader reader) {
    if (reader instanceof FilterLeafReader) {
        if (reader instanceof ElasticsearchLeafReader) {
            return (ElasticsearchLeafReader) reader;
        } else {
            // We need to use FilterLeafReader#getDelegate and not FilterLeafReader#unwrap, because
            // If there are multiple levels of filtered leaf readers then with the unwrap() method it immediately
            // returns the most inner leaf reader and thus skipping of over any other filtered leaf reader that
            // may be instance of ElasticsearchLeafReader. This can cause us to miss the shardId.
            return getElasticsearchLeafReader(((FilterLeafReader) reader).getDelegate());
        }
    }
    return null;
}
 
Example #13
Source File: FieldMaskingReader.java    From crate with Apache License 2.0 5 votes vote down vote up
public FieldMaskingReader(String field, DirectoryReader in) throws IOException {
    super(in, new FilterDirectoryReader.SubReaderWrapper() {
        @Override
        public LeafReader wrap(LeafReader reader) {
            return new FilterLeafReader(new FieldFilterLeafReader(reader, Collections.singleton(field), true)) {

                // FieldFilterLeafReader does not forward cache helpers
                // since it considers it is illegal because of the fact
                // that it changes the content of the index. However we
                // want this behavior for tests, and security plugins
                // are careful to only use the cache when it's valid

                @Override
                public CacheHelper getReaderCacheHelper() {
                    return reader.getReaderCacheHelper();
                }

                @Override
                public CacheHelper getCoreCacheHelper() {
                    return reader.getCoreCacheHelper();
                }
            };
        }
    });
    this.field = field;

}
 
Example #14
Source File: SegmentsInfoRequestHandler.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private SimpleOrderedMap<Object> getSegmentInfo(
    SegmentCommitInfo segmentCommitInfo, boolean withSizeInfo, boolean withFieldInfos,
    List<LeafReaderContext> leafContexts, IndexSchema schema) throws IOException {
  SimpleOrderedMap<Object> segmentInfoMap = new SimpleOrderedMap<>();

  segmentInfoMap.add(NAME, segmentCommitInfo.info.name);
  segmentInfoMap.add("delCount", segmentCommitInfo.getDelCount());
  segmentInfoMap.add("softDelCount", segmentCommitInfo.getSoftDelCount());
  segmentInfoMap.add("hasFieldUpdates", segmentCommitInfo.hasFieldUpdates());
  segmentInfoMap.add("sizeInBytes", segmentCommitInfo.sizeInBytes());
  segmentInfoMap.add("size", segmentCommitInfo.info.maxDoc());
  Long timestamp = Long.parseLong(segmentCommitInfo.info.getDiagnostics()
      .get("timestamp"));
  segmentInfoMap.add("age", new Date(timestamp));
  segmentInfoMap.add("source",
      segmentCommitInfo.info.getDiagnostics().get("source"));
  segmentInfoMap.add("version", segmentCommitInfo.info.getVersion().toString());
  // don't open a new SegmentReader - try to find the right one from the leaf contexts
  SegmentReader seg = null;
  for (LeafReaderContext lrc : leafContexts) {
    LeafReader leafReader = lrc.reader();
    leafReader = FilterLeafReader.unwrap(leafReader);
    if (leafReader instanceof SegmentReader) {
      SegmentReader sr = (SegmentReader)leafReader;
      if (sr.getSegmentInfo().info.equals(segmentCommitInfo.info)) {
        seg = sr;
        break;
      }
    }
  }
  if (seg != null) {
    LeafMetaData metaData = seg.getMetaData();
    if (metaData != null) {
      segmentInfoMap.add("createdVersionMajor", metaData.getCreatedVersionMajor());
      segmentInfoMap.add("minVersion", metaData.getMinVersion().toString());
      if (metaData.getSort() != null) {
        segmentInfoMap.add("sort", metaData.getSort().toString());
      }
    }
  }
  if (!segmentCommitInfo.info.getDiagnostics().isEmpty()) {
    segmentInfoMap.add("diagnostics", segmentCommitInfo.info.getDiagnostics());
  }
  if (!segmentCommitInfo.info.getAttributes().isEmpty()) {
    segmentInfoMap.add("attributes", segmentCommitInfo.info.getAttributes());
  }
  if (withSizeInfo) {
    Directory dir = segmentCommitInfo.info.dir;
    List<Pair<String, Long>> files = segmentCommitInfo.files().stream()
        .map(f -> {
          long size = -1;
          try {
            size = dir.fileLength(f);
          } catch (IOException e) {
          }
          return new Pair<String, Long>(f, size);
        }).sorted((p1, p2) -> {
          if (p1.second() > p2.second()) {
            return -1;
          } else if (p1.second() < p2.second()) {
            return 1;
          } else {
            return 0;
          }
        }).collect(Collectors.toList());
    if (!files.isEmpty()) {
      SimpleOrderedMap<Object> topFiles = new SimpleOrderedMap<>();
      for (int i = 0; i < Math.min(files.size(), 5); i++) {
        Pair<String, Long> p = files.get(i);
        topFiles.add(p.first(), RamUsageEstimator.humanReadableUnits(p.second()));
      }
      segmentInfoMap.add("largestFiles", topFiles);
    }
  }
  if (seg != null && withSizeInfo) {
    SimpleOrderedMap<Object> ram = new SimpleOrderedMap<>();
    ram.add("total", seg.ramBytesUsed());
    for (Accountable ac : seg.getChildResources()) {
      accountableToMap(ac, ram::add);
    }
    segmentInfoMap.add("ramBytesUsed", ram);
  }
  if (withFieldInfos) {
    if (seg == null) {
      log.debug("Skipping segment info - not available as a SegmentReader: {}", segmentCommitInfo);
    } else {
      FieldInfos fis = seg.getFieldInfos();
      SimpleOrderedMap<Object> fields = new SimpleOrderedMap<>();
      for (FieldInfo fi : fis) {
        fields.add(fi.name, getFieldInfo(seg, fi, schema));
      }
      segmentInfoMap.add("fields", fields);
    }
  }

  return segmentInfoMap;
}