Java Code Examples for org.apache.solr.search.DocSet

The following examples show how to use org.apache.solr.search.DocSet. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
SolrCachingScorerDoIdSetIterator(DocSet in, LeafReaderContext context, SolrIndexSearcher searcher)
{
	  this.context = context;
      
      if (in instanceof BitDocSet)
      {
          matches = (BitDocSet) in;
      }
      else
      {
          this.matches = new BitDocSet(new FixedBitSet(searcher.maxDoc()));
          for (DocIterator it = in.iterator(); it.hasNext(); /* */)
          {
              matches.addUnique(it.nextDoc());
          }
      }
      bitSet = matches.getBits();
      
      doc = getBase() - 1;
}
 
Example 2
public static SolrOwnerScorer createOwnerScorer(Weight weight, LeafReaderContext context, SolrIndexSearcher searcher, String authority) throws IOException
{
    if (AuthorityType.getAuthorityType(authority) == AuthorityType.USER)
    {
        DocSet ownedDocs = (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_OWNERLOOKUP_CACHE, authority);

        if (ownedDocs == null)
        {
            // Cache miss: query the index for docs where the owner matches the authority. 
            ownedDocs = searcher.getDocSet(new TermQuery(new Term(QueryConstants.FIELD_OWNER, authority)));
            searcher.cacheInsert(CacheConstants.ALFRESCO_OWNERLOOKUP_CACHE, authority, ownedDocs);
        }
        return new SolrOwnerScorer(weight, ownedDocs, context, searcher);
    }
    
    // Return an empty doc set, as the authority isn't a user.
    return new SolrOwnerScorer(weight, new BitDocSet(new FixedBitSet(0)), context, searcher);
}
 
Example 3
Source Project: BioSolr   Source File: ParentNodeFacetTreeBuilder.java    License: Apache License 2.0 6 votes vote down vote up
private Map<String, Set<String>> findParentIdsForNodes(SolrIndexSearcher searcher, Collection<String> nodeIds) throws IOException {
	Map<String, Set<String>> parentIds = new HashMap<>();
	
	LOGGER.debug("Looking up parents for {} nodes", nodeIds.size());
	Query filter = buildFilterQuery(getNodeField(), nodeIds);
	LOGGER.trace("Filter query: {}", filter);
	
	DocSet docs = searcher.getDocSet(filter);
	
	for (DocIterator it = docs.iterator(); it.hasNext(); ) {
		Document doc = searcher.doc(it.nextDoc(), docFields);
		String nodeId = doc.get(getNodeField());
		
		Set<String> parentIdValues = new HashSet<>(Arrays.asList(doc.getValues(parentField)));
		parentIds.put(nodeId, parentIdValues);
		
		// Record the label, if required
		if (isLabelRequired(nodeId)) {
			recordLabel(nodeId, doc.getValues(getLabelField()));
		}
	}
	
	return parentIds;
}
 
Example 4
Source Project: lucene-solr   Source File: SimpleFacets.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Returns a grouped facet count for the facet query
 *
 * @see FacetParams#FACET_QUERY
 */
public int getGroupedFacetQueryCount(Query facetQuery, DocSet docSet) throws IOException {
  // It is okay to retrieve group.field from global because it is never a local param
  String groupField = global.get(GroupParams.GROUP_FIELD);
  if (groupField == null) {
    throw new SolrException (
        SolrException.ErrorCode.BAD_REQUEST,
        "Specify the group.field as parameter or local parameter"
    );
  }

  @SuppressWarnings({"rawtypes"})
  AllGroupsCollector collector = new AllGroupsCollector<>(new TermGroupSelector(groupField));
  searcher.search(QueryUtils.combineQueryAndFilter(facetQuery, docSet.getTopFilter()), collector);
  return collector.getGroupCount();
}
 
Example 5
Source Project: lucene-solr   Source File: SimpleFacets.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Computes the term-&gt;count counts for the specified term values relative to the
 *
 * @param field the name of the field to compute term counts against
 * @param parsed contains the docset to compute term counts relative to
 * @param terms a list of term values (in the specified field) to compute the counts for
 */
protected NamedList<Integer> getListedTermCounts(String field, final ParsedParams parsed, List<String> terms)
    throws IOException {
  final String sort = parsed.params.getFieldParam(field, FacetParams.FACET_SORT, "empty");
  final SchemaField sf = searcher.getSchema().getField(field);
  final FieldType ft = sf.getType();
  final DocSet baseDocset = parsed.docs;
  final NamedList<Integer> res = new NamedList<>();
  Stream<String> inputStream = terms.stream();
  if (sort.equals(FacetParams.FACET_SORT_INDEX)) { // it might always make sense
    inputStream = inputStream.sorted();
  }
  Stream<SimpleImmutableEntry<String,Integer>> termCountEntries = inputStream
      .map((term) -> new SimpleImmutableEntry<>(term, numDocs(term, sf, ft, baseDocset)));
  if (sort.equals(FacetParams.FACET_SORT_COUNT)) {
    termCountEntries = termCountEntries.sorted(Collections.reverseOrder(Map.Entry.comparingByValue()));
  }
  termCountEntries.forEach(e -> res.add(e.getKey(), e.getValue()));
  return res;
}
 
Example 6
Source Project: BioSolr   Source File: TestXJoinQParserPlugin.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testMultiValued() throws Exception {
  Query q = parse(COMPONENT_NAME_4);
  DocSet docs = searcher.getDocSet(q);

  assertEquals(4, docs.size());
  DocIterator it = docs.iterator();
  assertTrue(it.hasNext());
  assertEquals(0, it.nextDoc());
  assertTrue(it.hasNext());
  assertEquals(1, it.nextDoc());
  assertTrue(it.hasNext());
  assertEquals(2, it.nextDoc());
  assertTrue(it.hasNext());
  assertEquals(3, it.nextDoc());
  assertFalse(it.hasNext());    
}
 
Example 7
Source Project: lucene-solr   Source File: SolrRangeQuery.java    License: Apache License 2.0 6 votes vote down vote up
private DocSet createDocSet(SolrIndexSearcher searcher, long cost) throws IOException {
  int maxDoc = searcher.maxDoc();
  BitDocSet liveDocs = searcher.getLiveDocSet();
  FixedBitSet liveBits = liveDocs.size() == maxDoc ? null : liveDocs.getBits();

  DocSetBuilder builder = new DocSetBuilder(maxDoc, cost);

  List<LeafReaderContext> leaves = searcher.getTopReaderContext().leaves();

  int maxTermsPerSegment = 0;
  for (LeafReaderContext ctx : leaves) {
    TermsEnum te = getTermsEnum(ctx);
    int termsVisited = builder.add(te, ctx.docBase);
    maxTermsPerSegment = Math.max(maxTermsPerSegment, termsVisited);
  }

  DocSet set =  maxTermsPerSegment <= 1 ? builder.buildUniqueInOrder(liveBits) : builder.build(liveBits);
  return DocSetUtil.getDocSet(set, searcher);
}
 
Example 8
Source Project: lucene-solr   Source File: CommandHandler.java    License: Apache License 2.0 6 votes vote down vote up
private DocSet computeGroupedDocSet(Query query, ProcessedFilter filter, List<Collector> collectors) throws IOException {
  @SuppressWarnings({"rawtypes"})
  Command firstCommand = commands.get(0);
  String field = firstCommand.getKey();
  SchemaField sf = searcher.getSchema().getField(field);
  FieldType fieldType = sf.getType();
  
  @SuppressWarnings({"rawtypes"})
  final AllGroupHeadsCollector allGroupHeadsCollector;
  if (fieldType.getNumberType() != null) {
    ValueSource vs = fieldType.getValueSource(sf, null);
    allGroupHeadsCollector = AllGroupHeadsCollector.newCollector(new ValueSourceGroupSelector(vs, new HashMap<>()),
        firstCommand.getWithinGroupSort());
  } else {
    allGroupHeadsCollector
        = AllGroupHeadsCollector.newCollector(new TermGroupSelector(firstCommand.getKey()), firstCommand.getWithinGroupSort());
  }
  if (collectors.isEmpty()) {
    searchWithTimeLimiter(query, filter, allGroupHeadsCollector);
  } else {
    collectors.add(allGroupHeadsCollector);
    searchWithTimeLimiter(query, filter, MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()])));
  }

  return new BitDocSet(allGroupHeadsCollector.retrieveGroupHeads(searcher.maxDoc()));
}
 
Example 9
Source Project: lucene-solr   Source File: StatsField.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Computes the {@link StatsValues} for this {@link StatsField} relative to the 
 * specified {@link DocSet} 
 * @see #computeBaseDocSet
 */
public StatsValues computeLocalStatsValues(DocSet base) throws IOException {

  if (statsToCalculate.isEmpty()) { 
    // perf optimization for the case where we compute nothing
    // ie: stats.field={!min=$domin}myfield&domin=false
    return StatsValuesFactory.createStatsValues(this);
  }

  if (null != schemaField && !schemaField.getType().isPointField()
      && (schemaField.multiValued() || schemaField.getType().multiValuedFieldCache())) {

    // TODO: should this also be used for single-valued string fields? (should work fine)
    return DocValuesStats.getCounts(searcher, this, base, facets);
  } else {
    // either a single valued field we pull from FieldCache, or an explicit
    // function ValueSource
    return computeLocalValueSourceStats(base);
  }
}
 
Example 10
Source Project: lucene-solr   Source File: BlockJoin.java    License: Apache License 2.0 6 votes vote down vote up
/** acceptDocs will normally be used to avoid deleted documents from being generated as part of the answer DocSet (just use *:*)
 *  although it can be used to further constrain the generated documents.
 */
public static DocSet toChildren(DocSet parentInput, BitDocSet parentList, DocSet acceptDocs, QueryContext qcontext) throws IOException {
  FixedBitSet parentBits = parentList.getBits();
  DocSetCollector collector = new DocSetCollector(qcontext.searcher().maxDoc());
  DocIterator iter = parentInput.iterator();
  while (iter.hasNext()) {
    int parentDoc = iter.nextDoc();
    if (!parentList.exists(parentDoc) || parentDoc == 0) { // test for parentDoc==0 here to avoid passing -1 to prevSetBit later on
      // not a parent, or parent has no children
      continue;
    }
    int prevParent = parentBits.prevSetBit(parentDoc - 1);
    for (int childDoc = prevParent+1; childDoc<parentDoc; childDoc++) {
      if (acceptDocs != null && !acceptDocs.exists(childDoc)) continue;  // only select live docs
      collector.collect(childDoc);
    }
  }
  return collector.getDocSet();
}
 
Example 11
Source Project: lucene-solr   Source File: BlockJoin.java    License: Apache License 2.0 6 votes vote down vote up
/** childInput may also contain parents (i.e. a parent or below will all roll up to that parent) */
public static DocSet toParents(DocSet childInput, BitDocSet parentList, QueryContext qcontext) throws IOException {
  FixedBitSet parentBits = parentList.getBits();
  DocSetCollector collector = new DocSetCollector(qcontext.searcher().maxDoc());
  DocIterator iter = childInput.iterator();
  int currentParent = -1;
  while (iter.hasNext()) {
    int childDoc = iter.nextDoc(); // TODO: skipping
    if (childDoc <= currentParent) { // use <= since we also allow parents in the input
      // we already visited this parent
      continue;
    }
    currentParent = parentBits.nextSetBit(childDoc);
    if (currentParent != DocIdSetIterator.NO_MORE_DOCS) {
      // only collect the parent the first time we skip to it
      collector.collect( currentParent );
    }
  }
  return collector.getDocSet();
}
 
Example 12
Source Project: BioSolr   Source File: TestXJoinQParserPlugin.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testMultiValued() throws Exception {
  Query q = parse(COMPONENT_NAME_4);
  DocSet docs = searcher.getDocSet(q);

  assertEquals(4, docs.size());
  DocIterator it = docs.iterator();
  assertTrue(it.hasNext());
  assertEquals(0, it.nextDoc());
  assertTrue(it.hasNext());
  assertEquals(1, it.nextDoc());
  assertTrue(it.hasNext());
  assertEquals(2, it.nextDoc());
  assertTrue(it.hasNext());
  assertEquals(3, it.nextDoc());
  assertFalse(it.hasNext());    
}
 
Example 13
Source Project: lucene-solr   Source File: SolrPluginUtils.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Executes a basic query
 */
public static DocList doSimpleQuery(String sreq,
                                    SolrQueryRequest req,
                                    int start, int limit) throws IOException {
  List<String> commands = StrUtils.splitSmart(sreq,';');

  String qs = commands.size() >= 1 ? commands.get(0) : "";
  try {
  Query query = QParser.getParser(qs, req).getQuery();

  // If the first non-query, non-filter command is a simple sort on an indexed field, then
  // we can use the Lucene sort ability.
  Sort sort = null;
  if (commands.size() >= 2) {
    sort = SortSpecParsing.parseSortSpec(commands.get(1), req).getSort();
  }

  DocList results = req.getSearcher().getDocList(query,(DocSet)null, sort, start, limit);
  return results;
  } catch (SyntaxError e) {
    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error parsing query: " + qs);
  }

}
 
Example 14
Source Project: lucene-solr   Source File: TaggerRequestHandler.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * The set of documents matching the provided 'fq' (filter query). Don't include deleted docs
 * either. If null is returned, then all docs are available.
 */
private Bits computeDocCorpus(SolrQueryRequest req) throws SyntaxError, IOException {
  final String[] corpusFilterQueries = req.getParams().getParams("fq");
  final SolrIndexSearcher searcher = req.getSearcher();
  final Bits docBits;
  if (corpusFilterQueries != null && corpusFilterQueries.length > 0) {
    List<Query> filterQueries = new ArrayList<Query>(corpusFilterQueries.length);
    for (String corpusFilterQuery : corpusFilterQueries) {
      QParser qParser = QParser.getParser(corpusFilterQuery, null, req);
      try {
        filterQueries.add(qParser.parse());
      } catch (SyntaxError e) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
      }
    }

    final DocSet docSet = searcher.getDocSet(filterQueries);//hopefully in the cache

    docBits = docSet.getBits();
  } else {
    docBits = searcher.getSlowAtomicReader().getLiveDocs();
  }
  return docBits;
}
 
Example 15
Source Project: BioSolr   Source File: TestXJoinQParserPlugin.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSingleComponent() throws Exception {
  Query q = parse(COMPONENT_NAME);
  DocSet docs = searcher.getDocSet(q);

  assertEquals(2, docs.size());
  DocIterator it = docs.iterator();
  assertTrue(it.hasNext());
  assertEquals(1, it.nextDoc());
  assertTrue(it.hasNext());
  assertEquals(3, it.nextDoc());
  assertFalse(it.hasNext());
}
 
Example 16
Source Project: lucene-solr   Source File: StatsComponent.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void process(ResponseBuilder rb) throws IOException {
  if (!rb.doStats) return;
  Map<String, StatsValues> statsValues = new LinkedHashMap<>();

  for (StatsField statsField : rb._statsInfo.getStatsFields()) {
    DocSet docs = statsField.computeBaseDocSet();
    statsValues.put(statsField.getOutputKey(), statsField.computeLocalStatsValues(docs));
  }

  rb.rsp.add("stats", convertToResponse(statsValues));
}
 
Example 17
Source Project: BioSolr   Source File: ChildNodeFacetTreeBuilder.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Fetch facets for items containing a specific set of values.
 * @param searcher the searcher for the collection being used.
 * @param facetValues the incoming values to use as filters.
 * @param filterField the item field containing the child values, which will be used
 * to filter against.
 * @return a map of node value to child values for the items.
 * @throws IOException
 */
private Map<String, Set<String>> filterEntriesByField(SolrIndexSearcher searcher, Collection<String> facetValues,
		String filterField) throws IOException {
	Map<String, Set<String>> filteredEntries = new HashMap<>();

	LOGGER.debug("Looking up {} entries in field {}", facetValues.size(), filterField);
	Query filter = buildFilterQuery(filterField, facetValues);
	LOGGER.trace("Filter query: {}", filter);

	DocSet docs = searcher.getDocSet(filter);

	for (DocIterator it = docs.iterator(); it.hasNext(); ) {
		Document doc = searcher.doc(it.nextDoc(), docFields);
		String nodeId = doc.get(getNodeField());
		
		// Get the children for the node, if necessary
		Set<String> childIds;
		if (filterField.equals(getNodeField())) {
			// Filtering on the node field - child IDs are redundant
			childIds = Collections.emptySet();
		} else {
			childIds = new HashSet<>(Arrays.asList(doc.getValues(filterField)));
			LOGGER.trace("Got {} children for node {}", childIds.size(), nodeId);
		}
		filteredEntries.put(nodeId, childIds);
		
		// Record the label, if required
		if (isLabelRequired(nodeId)) {
			recordLabel(nodeId, doc.getValues(getLabelField()));
		}
	}

	return filteredEntries;
}
 
Example 18
Source Project: lucene-solr   Source File: SimpleFacets.java    License: Apache License 2.0 5 votes vote down vote up
public SimpleFacets(SolrQueryRequest req,
                    DocSet docs,
                    SolrParams params,
                    ResponseBuilder rb) {
  this.req = req;
  this.searcher = req.getSearcher();
  this.docsOrig = docs;
  this.global = params;
  this.rb = rb;
  this.facetExecutor = req.getCore().getCoreContainer().getUpdateShardHandler().getUpdateExecutor();
}
 
Example 19
Source Project: lucene-solr   Source File: SimpleFacets.java    License: Apache License 2.0 5 votes vote down vote up
private int numDocs(String term, final SchemaField sf, final FieldType ft, final DocSet baseDocset) {
  try {
    return searcher.numDocs(ft.getFieldQuery(null, sf, term), baseDocset);
  } catch (IOException e1) {
    throw new RuntimeException(e1);
  }
}
 
Example 20
Source Project: lucene-solr   Source File: RangeFacetProcessor.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Macro for getting the numDocs of range over docs
 *
 * @see org.apache.solr.search.SolrIndexSearcher#numDocs
 * @see org.apache.lucene.search.TermRangeQuery
 */
protected int rangeCount(DocSet subset, RangeFacetRequest rfr, RangeFacetRequest.FacetRange fr) throws IOException, SyntaxError {
  SchemaField schemaField = rfr.getSchemaField();
  Query rangeQ = schemaField.getType().getRangeQuery(null, schemaField, fr.lower, fr.upper, fr.includeLower, fr.includeUpper);
  if (rfr.isGroupFacet()) {
    return getGroupedFacetQueryCount(rangeQ, subset);
  } else {
    return searcher.numDocs(rangeQ, subset);
  }
}
 
Example 21
Source Project: lucene-solr   Source File: SimpleFacets.java    License: Apache License 2.0 5 votes vote down vote up
/**
 *  Works like {@link #getFacetTermEnumCounts(SolrIndexSearcher, DocSet, String, int, int, int, boolean, String, String, Predicate, boolean)}
 *  but takes a substring directly for the contains check rather than a {@link Predicate} instance.
 */
public NamedList<Integer> getFacetTermEnumCounts(SolrIndexSearcher searcher, DocSet docs, String field, int offset, int limit, int mincount, boolean missing,
                                                 String sort, String prefix, String contains, boolean ignoreCase, boolean intersectsCheck)
  throws IOException {

  final Predicate<BytesRef> termFilter = new SubstringBytesRefFilter(contains, ignoreCase);
  return getFacetTermEnumCounts(searcher, docs, field, offset, limit, mincount, missing, sort, prefix, termFilter, intersectsCheck);
}
 
Example 22
Source Project: lucene-solr   Source File: SimpleFacets.java    License: Apache License 2.0 5 votes vote down vote up
private static NamedList<Integer> finalize(NamedList<Integer> res, SolrIndexSearcher searcher, DocSet docs,
                                           String field, boolean missing) throws IOException {
  if (missing) {
    res.add(null, getFieldMissingCount(searcher,docs,field));
  }

  return res;
}
 
Example 23
Source Project: BioSolr   Source File: TestXJoinQParserPlugin.java    License: Apache License 2.0 5 votes vote down vote up
@Test
public void testSingleComponent() throws Exception {
  Query q = parse(COMPONENT_NAME);
  DocSet docs = searcher.getDocSet(q);

  assertEquals(2, docs.size());
  DocIterator it = docs.iterator();
  assertTrue(it.hasNext());
  assertEquals(1, it.nextDoc());
  assertTrue(it.hasNext());
  assertEquals(3, it.nextDoc());
  assertFalse(it.hasNext());
}
 
Example 24
public PerSegmentSingleValuedFaceting(SolrIndexSearcher searcher, DocSet docs, String fieldName, int offset, int limit, int mincount, boolean missing, String sort, String prefix, Predicate<BytesRef> filter) {
  this.searcher = searcher;
  this.docs = docs;
  this.fieldName = fieldName;
  this.offset = offset;
  this.limit = limit;
  this.mincount = mincount;
  this.missing = missing;
  this.sort = sort;
  this.prefix = prefix;
  this.termFilter = filter;
}
 
Example 25
Source Project: lucene-solr   Source File: IntervalFacets.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Constructor that accepts un-parsed intervals using "interval faceting" syntax. See {@link IntervalFacets} for syntax.
 * Intervals don't need to be in order.
 */
public IntervalFacets(SchemaField schemaField, SolrIndexSearcher searcher, DocSet docs, String[] intervals, SolrParams params) throws SyntaxError, IOException {
  this.schemaField = schemaField;
  this.searcher = searcher;
  this.docs = docs;
  this.intervals = getSortedIntervals(intervals, params);
  doCount();
}
 
Example 26
Source Project: lucene-solr   Source File: IntervalFacets.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Constructor that accepts an already constructed array of {@link FacetInterval} objects. This array needs to be sorted
 * by start value in weakly ascending order. null values are not allowed in the array.
 */
public IntervalFacets(SchemaField schemaField, SolrIndexSearcher searcher, DocSet docs, FacetInterval[] intervals) throws IOException {
  this.schemaField = schemaField;
  this.searcher = searcher;
  this.docs = docs;
  this.intervals = intervals;
  doCount();
}
 
Example 27
Source Project: lucene-solr   Source File: DocValuesFacets.java    License: Apache License 2.0 5 votes vote down vote up
/** finalizes result: computes missing count if applicable */
static NamedList<Integer> finalize(NamedList<Integer> res, SolrIndexSearcher searcher, SchemaField schemaField, DocSet docs, int missingCount, boolean missing) throws IOException {
  if (missing) {
    if (missingCount < 0) {
      missingCount = SimpleFacets.getFieldMissingCount(searcher,docs,schemaField.getName());
    }
    res.add(null, missingCount);
  }
  
  return res;
}
 
Example 28
Source Project: lucene-solr   Source File: NumericFacets.java    License: Apache License 2.0 5 votes vote down vote up
public static NamedList<Integer> getCounts(SolrIndexSearcher searcher, DocSet docs, String fieldName, int offset, int limit, int mincount, boolean missing, String sort) throws IOException {
  final SchemaField sf = searcher.getSchema().getField(fieldName);
  if (sf.multiValued()) {
    // TODO: evaluate using getCountsMultiValued for singleValued numerics with SingletonSortedNumericDocValues
    return getCountsMultiValued(searcher, docs, fieldName, offset, limit, mincount, missing, sort);
  }
  return getCountsSingleValue(searcher, docs, fieldName, offset, limit, mincount, missing, sort);
}
 
Example 29
Source Project: lucene-solr   Source File: FilterQuery.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException {
  // SolrRequestInfo reqInfo = SolrRequestInfo.getRequestInfo();

  if (!(searcher instanceof SolrIndexSearcher)) {
    // delete-by-query won't have SolrIndexSearcher
    return new BoostQuery(new ConstantScoreQuery(q), 0).createWeight(searcher, scoreMode, 1f);
  }

  SolrIndexSearcher solrSearcher = (SolrIndexSearcher)searcher;
  DocSet docs = solrSearcher.getDocSet(q);
  // reqInfo.addCloseHook(docs);  // needed for off-heap refcounting

  return new BoostQuery(new SolrConstantScoreQuery(docs.getTopFilter()), 0).createWeight(searcher, scoreMode, 1f);
}
 
Example 30
Source Project: lucene-solr   Source File: ChildDocTransformer.java    License: Apache License 2.0 5 votes vote down vote up
ChildDocTransformer(String name, BitSetProducer parentsFilter, DocSet childDocSet,
                    SolrReturnFields returnFields, boolean isNestedSchema, int limit) {
  this.name = name;
  this.parentsFilter = parentsFilter;
  this.childDocSet = childDocSet;
  this.limit = limit;
  this.isNestedSchema = isNestedSchema;
  this.childReturnFields = returnFields!=null? returnFields: new SolrReturnFields();
}