org.apache.lucene.index.AtomicReaderContext Java Examples

The following examples show how to use org.apache.lucene.index.AtomicReaderContext. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ShardWriter.java    From linden with Apache License 2.0 6 votes vote down vote up
/**
 * Process an intermediate form by carrying out, on the Lucene instance of
 * the shard, the deletes and the inserts (a ram index) in the form.
 * @param form  the intermediate form containing deletes and a ram index
 * @throws IOException
 */
public void process(IntermediateForm form, FacetsConfig facetsConfig) throws IOException {
  if (facetsConfig != null) {
    DirectoryTaxonomyWriter.OrdinalMap map = new DirectoryTaxonomyWriter.MemoryOrdinalMap();
    // merge the taxonomies
    taxoWriter.addTaxonomy(form.getTaxoDirectory(), map);
    int ordinalMap[] = map.getMap();
    DirectoryReader reader = DirectoryReader.open(form.getDirectory());
    try {
      List<AtomicReaderContext> leaves = reader.leaves();
      int numReaders = leaves.size();
      AtomicReader wrappedLeaves[] = new AtomicReader[numReaders];
      for (int i = 0; i < numReaders; i++) {
        wrappedLeaves[i] = new OrdinalMappingAtomicReader(leaves.get(i).reader(), ordinalMap, facetsConfig);
      }
      writer.addIndexes(new MultiReader(wrappedLeaves));
    } finally {
      reader.close();
    }
  } else {
    writer.addIndexes(new Directory[] { form.getDirectory() });
  }
  numForms++;
}
 
Example #2
Source File: ImageQuery.java    From elasticsearch-image with Apache License 2.0 6 votes vote down vote up
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
    Scorer scorer = scorer(context, context.reader().getLiveDocs());
    if (scorer != null) {
        int newDoc = scorer.advance(doc);
        if (newDoc == doc) {
            float score = scorer.score();
            ComplexExplanation result = new ComplexExplanation();
            result.setDescription("ImageQuery, product of:");
            result.setValue(score);
            if (getBoost() != 1.0f) {
                result.addDetail(new Explanation(getBoost(),"boost"));
                score = score / getBoost();
            }
            result.addDetail(new Explanation(score ,"image score (1/distance)"));
            result.setMatch(true);
            return result;
        }
    }

    return new ComplexExplanation(false, 0.0f, "no matching term");
}
 
Example #3
Source File: FacetExecutor.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
public void addScorers(AtomicReaderContext context, Scorer[] scorers) throws IOException {
  LOG.debug(getPrefix("adding scorers context [{0}] [{1}]"), context, Arrays.asList(scorers));
  if (scorers.length != _length) {
    throw new IOException("Scorer length is not correct expecting [" + _length + "] actual [" + scorers.length + "]");
  }
  Object key = getKey(context);
  Info info = _infoMap.get(key);
  if (info == null) {
    info = new Info(context, scorers, _locks, _instance);
    _infoMap.put(key, info);
  } else {
    AtomicReader reader = context.reader();
    LOG.warn(getPrefix("Info about reader context [{0}] already created, existing Info [{1}] current reader [{2}]."),
        context, info, reader);
  }
}
 
Example #4
Source File: MutatableAction.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
private IterableRow getIterableRow(String rowId, IndexSearcherCloseable searcher) throws IOException {
  IndexReader indexReader = searcher.getIndexReader();
  BytesRef rowIdRef = new BytesRef(rowId);
  List<AtomicReaderTermsEnum> possibleRowIds = new ArrayList<AtomicReaderTermsEnum>();
  for (AtomicReaderContext atomicReaderContext : indexReader.leaves()) {
    AtomicReader atomicReader = atomicReaderContext.reader();
    Fields fields = atomicReader.fields();
    if (fields == null) {
      continue;
    }
    Terms terms = fields.terms(BlurConstants.ROW_ID);
    if (terms == null) {
      continue;
    }
    TermsEnum termsEnum = terms.iterator(null);
    if (!termsEnum.seekExact(rowIdRef, true)) {
      continue;
    }
    // need atomic read as well...
    possibleRowIds.add(new AtomicReaderTermsEnum(atomicReader, termsEnum));
  }
  if (possibleRowIds.isEmpty()) {
    return null;
  }
  return new IterableRow(rowId, getRecords(possibleRowIds));
}
 
Example #5
Source File: LindenScoreModelStrategy.java    From linden with Apache License 2.0 6 votes vote down vote up
public void preProcess(AtomicReaderContext context, LindenSchema schema, LindenScoreModel scoreModel) {
  this.context = context;
  fieldSchemaMap = new HashMap<>();
  for (LindenFieldSchema fieldSchema : schema.getFields()) {
    fieldSchemaMap.put(fieldSchema.getName(), fieldSchema);
  }
  // add id field
  if (!fieldSchemaMap.containsKey(schema.getId())) {
    LindenFieldSchema idFieldSchema = new LindenFieldSchema();
    idFieldSchema.setName(schema.getId());
    idFieldSchema.setType(LindenType.STRING);
    idFieldSchema.setIndexed(true);
    idFieldSchema.setOmitNorms(true);
    idFieldSchema.setOmitFreqs(true);
    idFieldSchema.setStored(true);
    idFieldSchema.setTokenized(false);
    fieldSchemaMap.put(schema.getId(), idFieldSchema);
  }
  this.scoreModel = scoreModel;
}
 
Example #6
Source File: SecureRealTimeGetComponent.java    From incubator-sentry with Apache License 2.0 6 votes vote down vote up
/**
 * @param doc SolrDocument to check
 * @param idField field where the id is stored
 * @param fieldType type of id field
 * @param filterQuery Query to filter by
 * @param searcher SolrIndexSearcher on which to apply the filter query
 * @returns the internal docid, or -1 if doc is not found or doesn't match filter
 */
private static int getFilteredInternalDocId(SolrDocument doc, SchemaField idField, FieldType fieldType,
      Query filterQuery, SolrIndexSearcher searcher) throws IOException {
  int docid = -1;
  Field f = (Field)doc.getFieldValue(idField.getName());
  String idStr = f.stringValue();
  BytesRef idBytes = new BytesRef();
  fieldType.readableToIndexed(idStr, idBytes);
  // get the internal document id
  long segAndId = searcher.lookupId(idBytes);

    // if docid is valid, run it through the filter
  if (segAndId >= 0) {
    int segid = (int) segAndId;
    AtomicReaderContext ctx = searcher.getTopReaderContext().leaves().get((int) (segAndId >> 32));
    docid = segid + ctx.docBase;
    Weight weight = filterQuery.createWeight(searcher);
    Scorer scorer = weight.scorer(ctx, null);
    if (scorer == null || segid != scorer.advance(segid)) {
      // filter doesn't match.
      docid = -1;
    }
  }
  return docid;
}
 
Example #7
Source File: Blur024CodecTest.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
@Test
public void testDocValuesFormat() throws IOException {
  RAMDirectory directory = new RAMDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new WhitespaceAnalyzer(Version.LUCENE_43));
  conf.setCodec(new Blur024Codec());
  IndexWriter writer = new IndexWriter(directory, conf);

  Document doc = new Document();
  doc.add(new StringField("f", "v", Store.YES));
  doc.add(new SortedDocValuesField("f", new BytesRef("v")));
  writer.addDocument(doc);

  writer.close();

  DirectoryReader reader = DirectoryReader.open(directory);
  AtomicReaderContext context = reader.leaves().get(0);
  AtomicReader atomicReader = context.reader();
  SortedDocValues sortedDocValues = atomicReader.getSortedDocValues("f");
  assertTrue(sortedDocValues.getClass().getName().startsWith(DiskDocValuesProducer.class.getName()));

  reader.close();
}
 
Example #8
Source File: SecureIndexSearcher.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
public SecureIndexSearcher(IndexReaderContext context, ExecutorService executor,
    AccessControlFactory accessControlFactory, Collection<String> readAuthorizations,
    Collection<String> discoverAuthorizations, Set<String> discoverableFields, String defaultReadMaskMessage)
    throws IOException {
  super(context, executor);
  _accessControlFactory = accessControlFactory;
  _readAuthorizations = readAuthorizations;
  _discoverAuthorizations = discoverAuthorizations;
  _discoverableFields = discoverableFields;
  _defaultReadMaskMessage = defaultReadMaskMessage;
  _accessControlReader = _accessControlFactory.getReader(readAuthorizations, discoverAuthorizations,
      discoverableFields, _defaultReadMaskMessage);
  _secureIndexReader = getSecureIndexReader(context);
  List<AtomicReaderContext> leaves = _secureIndexReader.leaves();
  _leaveMap = new HashMap<Object, AtomicReaderContext>();
  for (AtomicReaderContext atomicReaderContext : leaves) {
    AtomicReader atomicReader = atomicReaderContext.reader();
    SecureAtomicReader secureAtomicReader = (SecureAtomicReader) atomicReader;
    AtomicReader originalReader = secureAtomicReader.getOriginalReader();
    Object coreCacheKey = originalReader.getCoreCacheKey();
    _leaveMap.put(coreCacheKey, atomicReaderContext);
  }
}
 
Example #9
Source File: SecureIndexSearcher.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
protected Collector getSecureCollector(final Collector collector) {
  return new Collector() {

    @Override
    public void setScorer(Scorer scorer) throws IOException {
      collector.setScorer(scorer);
    }

    @Override
    public void setNextReader(AtomicReaderContext context) throws IOException {
      Object key = context.reader().getCoreCacheKey();
      AtomicReaderContext atomicReaderContext = _leaveMap.get(key);
      collector.setNextReader(atomicReaderContext);
    }

    @Override
    public void collect(int doc) throws IOException {
      collector.collect(doc);
    }

    @Override
    public boolean acceptsDocsOutOfOrder() {
      return collector.acceptsDocsOutOfOrder();
    }
  };
}
 
Example #10
Source File: FlexibleWeight.java    From linden with Apache License 2.0 5 votes vote down vote up
@Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
  FlexibleScorer scorer = (FlexibleScorer) scorer(context, context.reader().getLiveDocs());
  if (scorer != null) {
    int newDoc = scorer.advance(doc);
    if (newDoc == doc) {
      FlexibleScoreModelStrategy strategy = scorer.getStrategy();
      strategy.prepare(0, 0, true);
      return strategy.explain(similarity, query, doc);
    }
  }
  return new ComplexExplanation(false, 0.0f, "no matching term");
}
 
Example #11
Source File: DocumentVisibilityFilter.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  AtomicReader reader = context.reader();
  List<DocIdSet> list = new ArrayList<DocIdSet>();

  Fields fields = reader.fields();
  Terms terms = fields.terms(_fieldName);
  if (terms == null) {
    // if field is not present then show nothing.
    return DocIdSet.EMPTY_DOCIDSET;
  }
  TermsEnum iterator = terms.iterator(null);
  BytesRef bytesRef;
  DocumentVisibilityEvaluator visibilityEvaluator = new DocumentVisibilityEvaluator(_authorizations);
  while ((bytesRef = iterator.next()) != null) {
    if (isVisible(visibilityEvaluator, bytesRef)) {
      DocIdSet docIdSet = _filterCacheStrategy.getDocIdSet(_fieldName, bytesRef, reader);
      if (docIdSet != null) {
        list.add(docIdSet);
      } else {
        // Do not use acceptDocs because we want the acl cache to be version
        // agnostic.
        DocsEnum docsEnum = iterator.docs(null, null);
        list.add(buildCache(reader, docsEnum, bytesRef));
      }
    }
  }
  return getLogicalOr(list);
}
 
Example #12
Source File: EarlyTerminationCollector.java    From linden with Apache License 2.0 5 votes vote down vote up
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
  collector.setNextReader(context);
  if (sort != null) {
    segmentSorted = SortingMergePolicy.isSorted(context.reader(), sort);
    segmentTotalCollect = segmentSorted ? numDocsToCollectPerSortedSegment : 2147483647;
  } else {
    segmentTotalCollect = numDocsToCollectPerSortedSegment;
  }
  numCollected = 0;
}
 
Example #13
Source File: IndexSearcherCloseableBase.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private Callable<Void> newSearchCallable(final Weight weight, final Collector collector, final AtomicReaderContext ctx) {
  return new Callable<Void>() {
    @Override
    public Void call() throws Exception {
      runSearch(weight, collector, ctx);
      return null;
    }
  };
}
 
Example #14
Source File: FilterCache.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
@Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
  AtomicReader reader = context.reader();
  Object key = reader.getCoreCacheKey();
  DocIdSet docIdSet = _cache.get(key);
  if (docIdSet != null) {
    _hits.incrementAndGet();
    return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
  }
  // This will only allow a single instance be created per reader per filter
  Object lock = getLock(key);
  synchronized (lock) {
    SegmentReader segmentReader = getSegmentReader(reader);
    if (segmentReader == null) {
      LOG.warn("Could not find SegmentReader from [{0}]", reader);
      return _filter.getDocIdSet(context, acceptDocs);
    }
    Directory directory = getDirectory(segmentReader);
    if (directory == null) {
      LOG.warn("Could not find Directory from [{0}]", segmentReader);
      return _filter.getDocIdSet(context, acceptDocs);
    }
    _misses.incrementAndGet();
    String segmentName = segmentReader.getSegmentName();
    docIdSet = docIdSetToCache(_filter.getDocIdSet(context, null), reader, segmentName, directory);
    _cache.put(key, docIdSet);
    return BitsFilteredDocIdSet.wrap(docIdSet, acceptDocs);
  }
}
 
Example #15
Source File: BaseReadMaskFieldTypeDefinitionTest.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private void checkTerms(IndexSearcher searcher, String fieldName) throws IOException {
  IndexReader reader = searcher.getIndexReader();
  for (AtomicReaderContext context : reader.leaves()) {
    AtomicReader atomicReader = context.reader();
    Fields fields = atomicReader.fields();
    Terms terms = fields.terms(fieldName);
    TermsEnum iterator = terms.iterator(null);
    BytesRef bytesRef = iterator.next();
    if (bytesRef != null) {
      System.out.println(bytesRef.utf8ToString());
      fail("There are only restricted terms for this field [" + fieldName + "]");
    }
  }
}
 
Example #16
Source File: XJoinValueSourceParser.java    From BioSolr with Apache License 2.0 5 votes vote down vote up
@Override
@SuppressWarnings("rawtypes")
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
  final BinaryDocValues joinValues = FieldCache.DEFAULT.getTerms(readerContext.reader(), joinField, false);

  return new DoubleDocValues(this) {

    @Override
    public double doubleVal(int doc) {
      BytesRef joinValue = joinValues.get(doc);
      if (joinValue == null) {
        throw new RuntimeException("No such doc: " + doc);
      }
      Object result = results.getResult(joinValue.utf8ToString());
      if (result == null) {
        return defaultValue;
      }
      if (result instanceof Iterable) {
        Double max = null;
        for (Object object : (Iterable)result) {
          if (object != null) {
            double value = getValue(object);
            if (max == null || value > max) {
              max = value;
            }
          }
        }
        return max != null ? max : defaultValue;
      } else {
        return getValue(result);
      }
    }
    
  };
}
 
Example #17
Source File: LindenScoreQuery.java    From linden with Apache License 2.0 5 votes vote down vote up
@Override
protected CustomScoreProvider getCustomScoreProvider(AtomicReaderContext context) throws IOException {
  try {
    return new LindenScoreProvider(context);
  } catch (Exception e) {
    throw new IOException(Throwables.getStackTraceAsString(e));
  }
}
 
Example #18
Source File: LuceneCorpusAdapter.java    From Palmetto with GNU Affero General Public License v3.0 5 votes vote down vote up
/**
 * Creates a corpus adapter which uses the Lucene index with the given path
 * and searches on the field with the given field name.
 * 
 * @param indexPath
 * @param fieldName
 * @return
 * @throws CorruptIndexException
 * @throws IOException
 */
public static LuceneCorpusAdapter create(String indexPath, String fieldName)
        throws CorruptIndexException, IOException {
    DirectoryReader dirReader = DirectoryReader.open(new NIOFSDirectory(new File(indexPath)));
    List<AtomicReaderContext> leaves = dirReader.leaves();
    AtomicReader reader[] = new AtomicReader[leaves.size()];
    AtomicReaderContext contexts[] = new AtomicReaderContext[leaves.size()];
    for (int i = 0; i < reader.length; i++) {
        contexts[i] = leaves.get(i);
        reader[i] = contexts[i].reader();
    }
    return new LuceneCorpusAdapter(dirReader, reader, contexts, fieldName);
}
 
Example #19
Source File: LuceneCorpusAdapter.java    From Palmetto with GNU Affero General Public License v3.0 5 votes vote down vote up
protected LuceneCorpusAdapter(DirectoryReader dirReader, AtomicReader reader[], AtomicReaderContext contexts[],
        String fieldName) {
    this.dirReader = dirReader;
    this.reader = reader;
    this.contexts = contexts;
    this.fieldName = fieldName;
}
 
Example #20
Source File: FacetExecutor.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
public OpenBitSet getBitSet(AtomicReaderContext context) throws IOException {
  Info info = _infoMap.get(getKey(context));
  if (info == null) {
    throw new IOException("Info object missing.");
  }
  return info._bitSet;
}
 
Example #21
Source File: MergeSortRowIdMatcher.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private void createCacheFile(Path file, SegmentKey segmentKey) throws IOException {
  LOG.info("Building cache for segment [{0}] to [{1}]", segmentKey, file);
  Path tmpPath = getTmpWriterPath(file.getParent());
  try (Writer writer = createWriter(_configuration, tmpPath)) {
    DirectoryReader reader = getReader();
    for (AtomicReaderContext context : reader.leaves()) {
      SegmentReader segmentReader = AtomicReaderUtil.getSegmentReader(context.reader());
      if (segmentReader.getSegmentName().equals(segmentKey.getSegmentName())) {
        writeRowIds(writer, segmentReader);
        break;
      }
    }
  }
  commitWriter(_configuration, file, tmpPath);
}
 
Example #22
Source File: LookupBuilderReducer.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private long getTotalNumberOfRowIds(DirectoryReader reader) throws IOException {
  long total = 0;
  List<AtomicReaderContext> leaves = reader.leaves();
  for (AtomicReaderContext context : leaves) {
    AtomicReader atomicReader = context.reader();
    Terms terms = atomicReader.terms(BlurConstants.ROW_ID);
    long expectedInsertions = terms.size();
    if (expectedInsertions < 0) {
      return -1;
    }
    total += expectedInsertions;
  }
  return total;
}
 
Example #23
Source File: IndexImporter.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private void applyDeletes(Directory directory, IndexWriter indexWriter, IndexSearcherCloseable searcher,
    String shard, boolean emitDeletes, Configuration configuration) throws IOException {
  DirectoryReader newReader = DirectoryReader.open(directory);
  try {
    List<AtomicReaderContext> newLeaves = newReader.getContext().leaves();
    BlurPartitioner blurPartitioner = new BlurPartitioner();
    Text key = new Text();
    int numberOfShards = _shardContext.getTableContext().getDescriptor().getShardCount();
    int shardId = ShardUtil.getShardIndex(shard);

    Action action = new Action() {
      @Override
      public void found(AtomicReader reader, Bits liveDocs, TermsEnum termsEnum) throws IOException {
        DocsEnum docsEnum = termsEnum.docs(liveDocs, null);
        if (docsEnum.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
          indexWriter.deleteDocuments(new Term(BlurConstants.ROW_ID, BytesRef.deepCopyOf(termsEnum.term())));
        }
      }
    };

    LOG.info("Applying deletes for table [{0}] shard [{1}] new reader [{2}]", _table, shard, newReader);
    boolean skipCheckRowIds = isInternal(newReader);
    LOG.info("Skip rowid check [{0}] for table [{1}] shard [{2}] new reader [{3}]", skipCheckRowIds, _table, shard,
        newReader);
    for (AtomicReaderContext context : newLeaves) {
      AtomicReader newAtomicReader = context.reader();
      if (isFastRowIdDeleteSupported(newAtomicReader)) {
        runNewRowIdCheckAndDelete(indexWriter, emitDeletes, blurPartitioner, key, numberOfShards, shardId,
            newAtomicReader, skipCheckRowIds);
      } else {
        runOldMergeSortRowIdCheckAndDelete(emitDeletes, searcher.getIndexReader(), blurPartitioner, key,
            numberOfShards, shardId, action, newAtomicReader);
      }
    }
  } finally {
    newReader.close();
  }
}
 
Example #24
Source File: FacetExecutor.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
Info(AtomicReaderContext context, Scorer[] scorers, Lock[] locks, String instance) {
  AtomicReader reader = context.reader();
  _instance = instance;
  _bitSet = new OpenBitSet(reader.maxDoc());
  _scorers = scorers;
  _reader = reader;
  _readerStr = _reader.toString();
  _maxDoc = _reader.maxDoc();
  _locks = locks;
}
 
Example #25
Source File: MergeSortRowIdLookup.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
public MergeSortRowIdLookup(IndexReader indexReader) throws IOException {
  if (indexReader instanceof AtomicReader) {
    addAtomicReader((AtomicReader) indexReader);
  } else {
    for (AtomicReaderContext context : indexReader.leaves()) {
      addAtomicReader(context.reader());
    }
  }
}
 
Example #26
Source File: IndexSearcherCloseableSecureBase.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private Callable<Void> newSearchCallable(final Weight weight, final Collector collector, final AtomicReaderContext ctx) {
  return new Callable<Void>() {
    @Override
    public Void call() throws Exception {
      runSearch(weight, collector, ctx);
      return null;
    }
  };
}
 
Example #27
Source File: IndexSearcherCloseableSecureBase.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private void runSearch(Weight weight, Collector collector, AtomicReaderContext ctx) throws IOException {
  Tracer trace = Trace.trace("search - internal", Trace.param("AtomicReader", ctx.reader()));
  try {
    super.search(makeList(ctx), weight, collector);
  } finally {
    trace.done();
  }
}
 
Example #28
Source File: BlurSecureIndexSearcherTest.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
@Test
public void testQueryFilterWrap1() throws IOException {
  IndexReader r = getIndexReader();
  AccessControlFactory accessControlFactory = new FilterAccessControlFactory();
  Collection<String> readAuthorizations = new ArrayList<String>();
  Collection<String> discoverAuthorizations = new ArrayList<String>();
  Set<String> discoverableFields = new HashSet<String>(Arrays.asList("rowid"));
  BlurSecureIndexSearcher blurSecureIndexSearcher = new BlurSecureIndexSearcher(r, null, accessControlFactory,
      readAuthorizations, discoverAuthorizations, discoverableFields, null);
  Query wrapFilter;
  Query query = new TermQuery(new Term("a", "b"));
  Filter filter = new Filter() {
    @Override
    public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
      throw new RuntimeException("Not implemented.");
    }
  };
  {
    Term primeDocTerm = new Term(BlurConstants.PRIME_DOC, BlurConstants.PRIME_DOC_VALUE);
    ScoreType scoreType = ScoreType.SUPER;
    SuperQuery superQuery = new SuperQuery(query, scoreType, primeDocTerm);
    wrapFilter = blurSecureIndexSearcher.wrapFilter(superQuery, filter);
    System.out.println(wrapFilter);
  }
  {
    assertTrue(wrapFilter instanceof SuperQuery);
    SuperQuery sq = (SuperQuery) wrapFilter;
    Query inner = sq.getQuery();
    assertTrue(inner instanceof FilteredQuery);
    FilteredQuery filteredQuery = (FilteredQuery) inner;
    Query innerFilteredQuery = filteredQuery.getQuery();
    assertEquals(innerFilteredQuery, query);
    assertTrue(filteredQuery.getFilter() == filter);
  }
}
 
Example #29
Source File: InfiniteLoopCommand.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
@Override
public Boolean execute(IndexContext context) throws IOException, InterruptedException {
  try {
    IndexReader indexReader = context.getIndexReader();
    while (true) {
      long hash = 0;
      for (AtomicReaderContext atomicReaderContext : indexReader.leaves()) {
        AtomicReader reader = atomicReaderContext.reader();
        for (String field : reader.fields()) {
          Terms terms = reader.terms(field);
          BytesRef bytesRef;
          TermsEnum iterator = terms.iterator(null);
          while ((bytesRef = iterator.next()) != null) {
            hash += bytesRef.hashCode();
          }
        }
      }
      System.out.println("hashcode = " + hash);
    }
  } catch (IOException e) {
    e.printStackTrace();
    throw e;
  } catch (Throwable t) {
    t.printStackTrace();
    if (t instanceof InterruptedException) {
      throw t;
    } else if (t instanceof RuntimeException) {
      throw (RuntimeException) t;
    }
    throw new RuntimeException(t);
  }
}
 
Example #30
Source File: GenericBlurRecordWriter.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private AtomicReader getAtomicReader(DirectoryReader reader) throws IOException {
  List<AtomicReaderContext> leaves = reader.leaves();
  if (leaves.size() == 1) {
    return leaves.get(0).reader();
  }
  throw new IOException("Reader [" + reader + "] has more than one segment after optimize.");
}