Java Code Examples for org.apache.solr.request.SolrQueryRequest#getSearcher()

The following examples show how to use org.apache.solr.request.SolrQueryRequest#getSearcher() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: LocalGraph.java    From SolRDF with Apache License 2.0 6 votes vote down vote up
/**
 * Builds a new {@link LocalGraph} with the given data.
 * 
 * @param graphNode the graph name.
 * @param request the Solr query request.
 * @param response the Solr query response.
 * @param qparser the query parser.
 * @param fetchSize the fetch size that will be used in reads.
 * @param consumer the Graph event consumer that will be notified on relevant events.
 */
private LocalGraph(
	final Node graphNode, 
	final SolrQueryRequest request, 
	final SolrQueryResponse response, 
	final QParser qparser, 
	final int fetchSize, 
	final GraphEventConsumer consumer) {
	super(graphNode, consumer, fetchSize);
	this.graphTermQuery = new TermQuery(new Term(Field.C, graphNodeStringified));
	this.request = request;
	this.updateCommand = new AddUpdateCommand(request);
	this.updateProcessor = request.getCore().getUpdateProcessingChain(null).createProcessor(request, response);
	this.searcher = request.getSearcher();
	this.qParser = qparser;
}
 
Example 2
Source File: HttpCacheHeaderUtil.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Calculate the appropriate last-modified time for Solr relative the current request.
 * 
 * @return the timestamp to use as a last modified time.
 */
public static long calcLastModified(final SolrQueryRequest solrReq) {
  final SolrCore core = solrReq.getCore();
  final SolrIndexSearcher searcher = solrReq.getSearcher();
  
  final LastModFrom lastModFrom
    = core.getSolrConfig().getHttpCachingConfig().getLastModFrom();

  long lastMod;
  try {
    // assume default, change if needed (getOpenTime() should be fast)
    lastMod =
      LastModFrom.DIRLASTMOD == lastModFrom
      ? IndexDeletionPolicyWrapper.getCommitTimestamp(searcher.getIndexReader().getIndexCommit())
      : searcher.getOpenTimeStamp().getTime();
  } catch (IOException e) {
    // we're pretty freaking screwed if this happens
    throw new SolrException(ErrorCode.SERVER_ERROR, e);
  }
  // Get the time where the searcher has been opened
  // We get rid of the milliseconds because the HTTP header has only
  // second granularity
  return lastMod - (lastMod % 1000L);
}
 
Example 3
Source File: LireRequestHandler.java    From liresolr with GNU General Public License v2.0 6 votes vote down vote up
/**
 * Returns a random set of documents from the index. Mainly for testing purposes.
 *
 * @param req
 * @param rsp
 * @throws IOException
 */
private void handleRandomSearch(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
    SolrIndexSearcher searcher = req.getSearcher();
    Query query = new MatchAllDocsQuery();
    DocList docList = searcher.getDocList(query, getFilterQueries(req), Sort.RELEVANCE, 0, numberOfCandidateResults, 0);
    int paramRows = Math.min(req.getParams().getInt("rows", defaultNumberOfResults), docList.size());
    if (docList.size() < 1) {
        rsp.add("Error", "No documents in index");
    } else {
        LinkedList list = new LinkedList();
        while (list.size() < paramRows) {
            DocList auxList = docList.subset((int) (Math.random() * docList.size()), 1);
            Document doc = null;
            for (DocIterator it = auxList.iterator(); it.hasNext(); ) {
                doc = searcher.doc(it.nextDoc());
            }
            if (!list.contains(doc)) {
                list.add(doc);
            }
        }
        rsp.addResponse(list);
    }
}
 
Example 4
Source File: TestSearchPerf.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
int doListGen(int iter, Query q, List<Query> filt, boolean cacheQuery, boolean cacheFilt) throws Exception {
  SolrQueryRequest req = lrf.makeRequest();

  SolrIndexSearcher searcher = req.getSearcher();

  final RTimer timer = new RTimer();

  int ret = 0;
  for (int i=0; i<iter; i++) {
    DocList l = searcher.getDocList(q, filt, (Sort)null, 0, 10, (cacheQuery?0:SolrIndexSearcher.NO_CHECK_QCACHE)|(cacheFilt?0:SolrIndexSearcher.NO_CHECK_FILTERCACHE) );
    ret += l.matches();
  }

  double elapsed = timer.getTime();
  System.out.println("ret="+ret+ " time="+elapsed+" throughput="+iter*1000/(elapsed+1));

  req.close();
  assertTrue(ret>0);  // make sure we did some work
  return ret;
}
 
Example 5
Source File: TaggerRequestHandler.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * The set of documents matching the provided 'fq' (filter query). Don't include deleted docs
 * either. If null is returned, then all docs are available.
 */
private Bits computeDocCorpus(SolrQueryRequest req) throws SyntaxError, IOException {
  final String[] corpusFilterQueries = req.getParams().getParams("fq");
  final SolrIndexSearcher searcher = req.getSearcher();
  final Bits docBits;
  if (corpusFilterQueries != null && corpusFilterQueries.length > 0) {
    List<Query> filterQueries = new ArrayList<Query>(corpusFilterQueries.length);
    for (String corpusFilterQuery : corpusFilterQueries) {
      QParser qParser = QParser.getParser(corpusFilterQuery, null, req);
      try {
        filterQueries.add(qParser.parse());
      } catch (SyntaxError e) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
      }
    }

    final DocSet docSet = searcher.getDocSet(filterQueries);//hopefully in the cache

    docBits = docSet.getBits();
  } else {
    docBits = searcher.getSlowAtomicReader().getLiveDocs();
  }
  return docBits;
}
 
Example 6
Source File: TestSearchPerf.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
int doSetGen(int iter, Query q) throws Exception {
  SolrQueryRequest req = lrf.makeRequest();

  SolrIndexSearcher searcher = req.getSearcher();

  final RTimer timer = new RTimer();

  int ret = 0;
  for (int i=0; i<iter; i++) {
    DocSet set = searcher.getDocSetNC(q, null);
    ret += set.size();
  }

  double elapsed = timer.getTime();
  System.out.println("ret="+ret+ " time="+elapsed+" throughput="+iter*1000/(elapsed+1));

  req.close();
  assertTrue(ret>0);  // make sure we did some work
  return ret;
}
 
Example 7
Source File: UnifiedSolrHighlighter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public SolrExtendedUnifiedHighlighter(SolrQueryRequest req) {
  super(req.getSearcher(), req.getSchema().getIndexAnalyzer());
  this.params = req.getParams();
  this.schema = req.getSchema();
  this.setMaxLength(
      params.getInt(HighlightParams.MAX_CHARS, DEFAULT_MAX_CHARS));
  this.setCacheFieldValCharsThreshold(
      params.getInt(HighlightParams.CACHE_FIELD_VAL_CHARS_THRESHOLD, DEFAULT_CACHE_CHARS_THRESHOLD));

  final RTimerTree timerTree;
  if (req.getRequestTimer() != null) { //It may be null if not used in a search context.
    timerTree = req.getRequestTimer();
  } else {
    timerTree = new RTimerTree(); // since null checks are annoying
  }
  loadFieldValuesTimer = timerTree.sub("loadFieldValues"); // we assume a new timer, state of STARTED
  loadFieldValuesTimer.pause(); // state of PAUSED now with about zero time. Will fail if state isn't STARTED.
}
 
Example 8
Source File: SolrTestCaseHS.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/***
public static void clearNCache() {
  SolrQueryRequest req = req();
  req.getSearcher().getnCache().clear();  // OFF-HEAP
  req.close();
}***/

public static void clearQueryCache() {
  SolrQueryRequest req = req();
  req.getSearcher();
  req.close();
}
 
Example 9
Source File: SolrPluginUtils.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings({"unchecked"})
public static void doStandardResultsDebug(
        SolrQueryRequest req,
        Query query,
        DocList results,
        boolean dbgResults,
        @SuppressWarnings({"rawtypes"})NamedList dbg) throws IOException
{
  if (dbgResults) {
    SolrIndexSearcher searcher = req.getSearcher();
    IndexSchema schema = searcher.getSchema();
    boolean explainStruct = req.getParams().getBool(CommonParams.EXPLAIN_STRUCT, false);

    if (results != null) {
      NamedList<Explanation> explain = getExplanations(query, results, searcher, schema);
      dbg.add("explain", explainStruct
          ? explanationsToNamedLists(explain)
          : explanationsToStrings(explain));
    }

    String otherQueryS = req.getParams().get(CommonParams.EXPLAIN_OTHER);
    if (otherQueryS != null && otherQueryS.length() > 0) {
      DocList otherResults = doSimpleQuery(otherQueryS, req, 0, 10);
      dbg.add("otherQuery", otherQueryS);
      NamedList<Explanation> explainO = getExplanations(query, otherResults, searcher, schema);
      dbg.add("explainOther", explainStruct
              ? explanationsToNamedLists(explainO)
              : explanationsToStrings(explainO));
    }
  }
}
 
Example 10
Source File: SolrPluginUtils.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Pre-fetch documents into the index searcher's document cache.
 *
 * This is an entirely optional step which you might want to perform for
 * the following reasons:
 *
 * <ul>
 *     <li>Locates the document-retrieval costs in one spot, which helps
 *     detailed performance measurement</li>
 *
 *     <li>Determines a priori what fields will be needed to be fetched by
 *     various subtasks, like response writing and highlighting.  This
 *     minimizes the chance that many needed fields will be loaded lazily.
 *     (it is more efficient to load all the field we require normally).</li>
 * </ul>
 *
 * If lazy field loading is disabled, this method does nothing.
 */
public static void optimizePreFetchDocs(ResponseBuilder rb,
                                        DocList docs,
                                        Query query,
                                        SolrQueryRequest req,
                                        SolrQueryResponse res) throws IOException {
  SolrIndexSearcher searcher = req.getSearcher();
  if(!searcher.getDocFetcher().isLazyFieldLoadingEnabled()) {
    // nothing to do
    return;
  }

  ReturnFields returnFields = res.getReturnFields();
  if(returnFields.getLuceneFieldNames() != null) {
    Set<String> fieldFilter = returnFields.getLuceneFieldNames();

    if (rb.doHighlights) {
      // copy return fields list
      fieldFilter = new HashSet<>(fieldFilter);
      // add highlight fields

      SolrHighlighter highlighter = HighlightComponent.getHighlighter(req.getCore());
      for (String field: highlighter.getHighlightFields(query, req, null))
        fieldFilter.add(field);

      // fetch unique key if one exists.
      SchemaField keyField = searcher.getSchema().getUniqueKeyField();
      if(null != keyField)
        fieldFilter.add(keyField.getName());
    }

    // get documents
    DocIterator iter = docs.iterator();
    for (int i=0; i<docs.size(); i++) {
      searcher.doc(iter.nextDoc(), fieldFilter);
    }

  }

}
 
Example 11
Source File: QueryComponent.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void doProcessUngroupedSearch(ResponseBuilder rb, QueryCommand cmd, QueryResult result) throws IOException {

    SolrQueryRequest req = rb.req;
    SolrQueryResponse rsp = rb.rsp;

    SolrIndexSearcher searcher = req.getSearcher();

    try {
      searcher.search(result, cmd);
    } catch (FuzzyTermsEnum.FuzzyTermsException e) {
      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
    }
    rb.setResult(result);

    ResultContext ctx = new BasicResultContext(rb);
    rsp.addResponse(ctx);
    rsp.getToLog().add("hits", rb.getResults()==null || rb.getResults().docList==null ? 0 : rb.getResults().docList.matches());

    if ( ! rb.req.getParams().getBool(ShardParams.IS_SHARD,false) ) {
      if (null != rb.getNextCursorMark()) {
        rb.rsp.add(CursorMarkParams.CURSOR_MARK_NEXT,
                   rb.getNextCursorMark().getSerializedTotem());
      }
    }

    if(rb.mergeFieldHandler != null) {
      rb.mergeFieldHandler.handleMergeFields(rb, searcher);
    } else {
      doFieldSortValues(rb, searcher);
    }

    doPrefetch(rb);
  }
 
Example 12
Source File: FacetRequest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Process this facet request against the given domain of docs.
 * Note: this is currently used externally by {@link org.apache.solr.request.SimpleFacets}.
 */
public final Object process(SolrQueryRequest req, DocSet domain) throws IOException {
  //TODO check for FacetDebugInfo?  and if so set on fcontext
  //  rb.req.getContext().get("FacetDebugInfo");
  //TODO should the SolrQueryRequest be held on the FacetRequest?  It was created from parse(req,...) so is known.
  FacetContext fcontext = new FacetContext();
  fcontext.base = domain;
  fcontext.req = req;
  fcontext.searcher = req.getSearcher();
  fcontext.qcontext = QueryContext.newContext(fcontext.searcher);

  return process(fcontext);
}
 
Example 13
Source File: LukeRequestHandler.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private static SimpleOrderedMap<Object> getIndexedFieldsInfo(SolrQueryRequest req)
    throws Exception {

  SolrIndexSearcher searcher = req.getSearcher();
  SolrParams params = req.getParams();

  Set<String> fields = null;
  String fl = params.get(CommonParams.FL);
  if (fl != null) {
    fields = new TreeSet<>(Arrays.asList(fl.split( "[,\\s]+" )));
  }

  LeafReader reader = searcher.getSlowAtomicReader();
  IndexSchema schema = searcher.getSchema();

  // Don't be tempted to put this in the loop below, the whole point here is to alphabetize the fields!
  Set<String> fieldNames = new TreeSet<>();
  for(FieldInfo fieldInfo : reader.getFieldInfos()) {
    fieldNames.add(fieldInfo.name);
  }

  // Walk the term enum and keep a priority queue for each map in our set
  SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>();

  for (String fieldName : fieldNames) {
    if (fields != null && ! fields.contains(fieldName) && ! fields.contains("*")) {
      continue; //we're not interested in this field Still an issue here
    }

    SimpleOrderedMap<Object> fieldMap = new SimpleOrderedMap<>();

    SchemaField sfield = schema.getFieldOrNull( fieldName );
    FieldType ftype = (sfield==null)?null:sfield.getType();

    fieldMap.add( "type", (ftype==null)?null:ftype.getTypeName() );
    fieldMap.add("schema", getFieldFlags(sfield));
    if (sfield != null && schema.isDynamicField(sfield.getName()) && schema.getDynamicPattern(sfield.getName()) != null) {
      fieldMap.add("dynamicBase", schema.getDynamicPattern(sfield.getName()));
    }
    Terms terms = reader.terms(fieldName);
    if (terms == null) { // Not indexed, so we need to report what we can (it made it through the fl param if specified)
      finfo.add( fieldName, fieldMap );
      continue;
    }

    if(sfield != null && sfield.indexed() ) {
      if (params.getBool(INCLUDE_INDEX_FIELD_FLAGS,true)) {
        Document doc = getFirstLiveDoc(terms, reader);

        if (doc != null) {
          // Found a document with this field
          try {
            IndexableField fld = doc.getField(fieldName);
            if (fld != null) {
              fieldMap.add("index", getFieldFlags(fld));
            } else {
              // it is a non-stored field...
              fieldMap.add("index", "(unstored field)");
            }
          } catch (Exception ex) {
            log.warn("error reading field: {}", fieldName);
          }
        }
      }
      fieldMap.add("docs", terms.getDocCount());
    }
    if (fields != null && (fields.contains(fieldName) || fields.contains("*"))) {
      getDetailedFieldInfo(req, fieldName, fieldMap);
    }
    // Add the field
    finfo.add( fieldName, fieldMap );
  }
  return finfo;
}
 
Example 14
Source File: TestSearcherReuse.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
/**
 * Helper method to get the searcher from a request, and assert that it's the main searcher
 */
public static SolrIndexSearcher getMainSearcher(SolrQueryRequest req) {
  SolrIndexSearcher s = req.getSearcher();
  assertMainSearcher(s);
  return s;
}
 
Example 15
Source File: TestFiltering.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
public void testLiveDocsSharing() throws Exception {
  clearIndex();
  for (int i=0; i<20; i++) {
    for (int repeat=0; repeat < (i%5==0 ? 2 : 1); repeat++) {
      assertU(adoc("id", Integer.toString(i), "foo_s", "foo", "val_i", Integer.toString(i), "val_s", Character.toString((char)('A' + i))));
    }
  }
  assertU(commit());

  String[] queries = {
      "foo_s:foo",
      "foo_s:f*",
      "*:*",
      "id:[* TO *]",
      "id:[0 TO 99]",
      "val_i:[0 TO 20]",
      "val_s:[A TO z]"
  };

  SolrQueryRequest req = req();
  try {
    SolrIndexSearcher searcher = req.getSearcher();

    DocSet live = null;
    for (String qstr :  queries) {
      Query q = QParser.getParser(qstr, null, req).getQuery();
      // System.out.println("getting set for " + q);
      DocSet set = searcher.getDocSet(q);
      if (live == null) {
        live = searcher.getLiveDocSet();
      }
      assertTrue( set == live);

      QueryCommand cmd = new QueryCommand();
      cmd.setQuery( QParser.getParser(qstr, null, req).getQuery() );
      cmd.setLen(random().nextInt(30));
      cmd.setNeedDocSet(true);
      QueryResult res = new QueryResult();
      searcher.search(res, cmd);
      set = res.getDocSet();
      assertTrue( set == live );

      cmd.setQuery( QParser.getParser(qstr + " OR id:0", null, req).getQuery() );
      cmd.setFilterList( QParser.getParser(qstr + " OR id:1", null, req).getQuery() );
      res = new QueryResult();
      searcher.search(res, cmd);
      set = res.getDocSet();
      assertTrue( set == live );
    }

  } finally {
    req.close();
  }
}
 
Example 16
Source File: AlfrescoLukeRequestHandler.java    From SearchServices with GNU Lesser General Public License v3.0 4 votes vote down vote up
private static SimpleOrderedMap<Object> getIndexedFieldsInfo(
		SolrQueryRequest req) throws Exception {

	SolrIndexSearcher searcher = req.getSearcher();
	SolrParams params = req.getParams();

	Set<String> fields = null;
	String fl = params.get(CommonParams.FL);
	if (fl != null) {
		fields = new TreeSet<>(Arrays.asList(fl.split("[,\\s]+")));
	}

	LeafReader reader = searcher.getSlowAtomicReader();
	IndexSchema schema = searcher.getSchema();

	// Don't be tempted to put this in the loop below, the whole point here
	// is to alphabetize the fields!
	Set<String> fieldNames = new TreeSet<>();
	for (FieldInfo fieldInfo : reader.getFieldInfos()) {
		fieldNames.add(fieldInfo.name);
	}

	// Walk the term enum and keep a priority queue for each map in our set
	SimpleOrderedMap<Object> vInfo = new SimpleOrderedMap<>();
	SimpleOrderedMap<Object> aInfo = new SimpleOrderedMap<>();

	for (String fieldName : fieldNames) {
		if (fields != null && !fields.contains(fieldName)
				&& !fields.contains("*")) {
			continue; // we're not interested in this field Still an issue
						// here
		}

		SimpleOrderedMap<Object> fieldMap = new SimpleOrderedMap<>();

		SchemaField sfield = schema.getFieldOrNull(fieldName);
		FieldType ftype = (sfield == null) ? null : sfield.getType();

		fieldMap.add("type", (ftype == null) ? null : ftype.getTypeName());
		fieldMap.add("schema", getFieldFlags(sfield));
		if (sfield != null && schema.isDynamicField(sfield.getName())
				&& schema.getDynamicPattern(sfield.getName()) != null) {
			fieldMap.add("dynamicBase",
					schema.getDynamicPattern(sfield.getName()));
		}
		Terms terms = reader.fields().terms(fieldName);
		if (terms == null) { // Not indexed, so we need to report what we
								// can (it made it through the fl param if
								// specified)
			vInfo.add(AlfrescoSolrDataModel.getInstance()
					.getAlfrescoPropertyFromSchemaField(fieldName),
					fieldMap);
			aInfo.add(fieldName, fieldMap);
			continue;
		}

		if (sfield != null && sfield.indexed()) {
			if (params.getBool(INCLUDE_INDEX_FIELD_FLAGS, true)) {
				Document doc = getFirstLiveDoc(terms, reader);

				if (doc != null) {
					// Found a document with this field
					try {
						IndexableField fld = doc.getField(fieldName);
						if (fld != null) {
							fieldMap.add("index", getFieldFlags(fld));
						} else {
							// it is a non-stored field...
							fieldMap.add("index", "(unstored field)");
						}
					} catch (Exception ex) {
						log.warn("error reading field: " + fieldName);
					}
				}
			}
			fieldMap.add("docs", terms.getDocCount());

		}
		if (fields != null
				&& (fields.contains(fieldName) || fields.contains("*"))) {
			getDetailedFieldInfo(req, fieldName, fieldMap);
		}
		// Add the field
		vInfo.add(fieldName, fieldMap);
		aInfo.add(AlfrescoSolrDataModel.getInstance()
				.getAlfrescoPropertyFromSchemaField(fieldName), fieldMap);
	}

	SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<>();
	finfo.addAll(vInfo);
	// finfo.add("mimetype()", finfo.get("cm:content.mimetype"));
	// finfo.add("contentSize()", finfo.get("cm:content.size"));
	finfo.addAll(aInfo);
	return finfo;
}
 
Example 17
Source File: TermRecognitionRequestHandler.java    From jate with GNU Lesser General Public License v3.0 4 votes vote down vote up
@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
    log.info("Term recognition request handler...");
    setTopInitArgsAsInvariants(req);

    final String jatePropertyFile = req.getParams().get(JATE_PROPERTY_FILE);
    final String algorithmName = req.getParams().get(TERM_RANKING_ALGORITHM);
    final Boolean isExtraction = req.getParams().getBool(CANDIDATE_EXTRACTION);
    final String outFilePath = req.getParams().get(AppParams.OUTPUT_FILE.getParamKey());
    final Boolean isIndexTerms = req.getParams().getBool(INDEX_TERM);
    final Boolean isBoosted = req.getParams().getBool(BOOSTING);

    final Algorithm algorithm = getAlgorithm(algorithmName);

    JATEProperties properties = App.getJateProperties(jatePropertyFile);

    final SolrIndexSearcher searcher = req.getSearcher();
    try {
     if (isExtraction) {
         log.info("start candidate extraction (i.e., re-index of whole corpus) ...");
         generalTRProcessor.candidateExtraction(searcher.getCore(), jatePropertyFile);
         log.info("complete candidate terms indexing.");
     }
	
     Map<String, String> trRunTimeParams = initialiseTRRunTimeParams(req);
     List<JATETerm> termList = generalTRProcessor.rankingAndFiltering(searcher.getCore(), jatePropertyFile, trRunTimeParams,
             algorithm);
	
     log.info(String.format("complete term recognition extraction! Finalized Term size [%s]", termList.size()));
	
     if (isExport(outFilePath)) {
         generalTRProcessor.export(termList);
     }
	
     if (isIndexTerms) {
         log.info("start to index filtered candidate terms ...");
         indexTerms(termList, properties, searcher, isBoosted, isExtraction);
         //trigger 'optimise' to build new index
         searcher.getCore().getUpdateHandler().commit(new CommitUpdateCommand(req, true));
         log.info("complete the indexing of candidate terms.");
	
     }
    } finally {
    	searcher.close();
    }
}
 
Example 18
Source File: AutocompleteResponseWriter.java    From apache-solr-essentials with Apache License 2.0 4 votes vote down vote up
/**
 * Here the writer creates its output.
 * 
 * @param writer the character stream writer.
 * @param request the current {@link SolrQueryRequest}
 * @param response the output response.
 * @throws IOException in case of I/O failure.
 */
@SuppressWarnings("rawtypes")
@Override
public void write(
		final Writer writer, 
		final SolrQueryRequest request, 
		final SolrQueryResponse response) throws IOException {
	
	// 1. Get a reference to values that compound the current response
	final NamedList elements = response.getValues();
	
	// 2. Use a StringBuilder to build the output 
	final StringBuilder builder = new StringBuilder("{")
		.append("query:'")
		.append(request.getParams().get(CommonParams.Q))
		.append("',");
	
	// 3. Get a reference to the object which hold the query result
	final Object value = elements.getVal(1);		
	if (value instanceof ResultContext)
	{
		final ResultContext context = (ResultContext) value;
	
		// The ordered list (actually the page subset) of matched documents
		final DocList ids = context.docs;
		if (ids != null)
		{
			final SolrIndexSearcher searcher = request.getSearcher();
			final DocIterator iterator = ids.iterator();
			builder.append("suggestions:[");
			
			// 4. Iterate over documents
			for (int i = 0; i < ids.size(); i++)
			{
				// 5. For each document we need to get the corresponding "label" attribute
				final Document document = searcher.doc(iterator.nextDoc(), FIELDS);
				if (i > 0)  { builder.append(","); }
				
				// 6. Append the label value to writer output
				builder
					.append("'")
					.append(((String) document.get("label")).replaceAll("'", "\\\\'").replaceAll("\"", "\\\\\""))
					.append("'");
			}
			builder.append("]").append("}");
		}
	}
	
	// 7. and finally write out the built character stream by means of output writer.
	writer.write(builder.toString());
}