Java Code Examples for org.apache.solr.util.RefCounted#decref()

The following examples show how to use org.apache.solr.util.RefCounted#decref() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DirectUpdateHandler2.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void prepareCommit(CommitUpdateCommand cmd) throws IOException {

    boolean error=true;

    try {
      log.debug("start {}", cmd);
      RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
      try {
        SolrIndexWriter.setCommitData(iw.get(), cmd.getVersion());
        iw.get().prepareCommit();
      } finally {
        iw.decref();
      }

      log.debug("end_prepareCommit");

      error=false;
    }
    finally {
      if (error) {
        numErrors.increment();
        numErrorsCumulative.mark();
      }
    }
  }
 
Example 2
Source File: FacetTreeGenerator.java    From BioSolr with Apache License 2.0 6 votes vote down vote up
public List<SimpleOrderedMap<Object>> generateTree(ResponseBuilder rb, NamedList<Integer> facetValues) throws IOException {
	List<SimpleOrderedMap<Object>> retVal = null;
	
	// First get the searcher for the required collection
	RefCounted<SolrIndexSearcher> searcherRef = getSearcherReference(rb);
	
	try {
		// Build the facet tree(s)
		Collection<TreeFacetField> fTrees = treeBuilder.processFacetTree(searcherRef.get(), extractFacetValues(facetValues));
		LOGGER.debug("Extracted {} facet trees", fTrees.size());
		
		if (pruner != null) {
			// Prune the trees
			fTrees = pruner.prune(fTrees);
		}

		// Convert the trees into a SimpleOrderedMap
		retVal = convertTreeFacetFields(fTrees);
	} finally {
		// Make sure the search ref count is decreased
		searcherRef.decref();
	}
	
	return retVal;
}
 
Example 3
Source File: DirectUpdateHandler2.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public void delete(DeleteUpdateCommand cmd) throws IOException {
  TestInjection.injectDirectUpdateLatch();
  deleteByIdCommands.increment();
  deleteByIdCommandsCumulative.mark();

  if ((cmd.getFlags() & UpdateCommand.IGNORE_INDEXWRITER) != 0 ) {
    if (ulog != null) ulog.delete(cmd);
    return;
  }

  Term deleteTerm = getIdTerm(cmd.getIndexedId(), false);
  // SolrCore.verbose("deleteDocuments",deleteTerm,writer);
  RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
  try {
    iw.get().deleteDocuments(deleteTerm);
  } finally {
    iw.decref();
  }
  // SolrCore.verbose("deleteDocuments",deleteTerm,"DONE");

  if (ulog != null) ulog.delete(cmd);

  updateDeleteTrackers(cmd);
}
 
Example 4
Source File: DirectUpdateHandler2.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void doNormalUpdate(AddUpdateCommand cmd) throws IOException {
  RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
  try {
    IndexWriter writer = iw.get();

    updateDocOrDocValues(cmd, writer);

    // Add to the transaction log *after* successfully adding to the
    // index, if there was no error.
    // This ordering ensures that if we log it, it's definitely been
    // added to the the index.
    // This also ensures that if a commit sneaks in-between, that we
    // know everything in a particular
    // log version was definitely committed.
    if (ulog != null) ulog.add(cmd);

  } finally {
    iw.decref();
  }
}
 
Example 5
Source File: UpdateLog.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** currently for testing only */
public void deleteAll() {
  synchronized (this) {

    try {
      RefCounted<SolrIndexSearcher> holder = uhandler.core.openNewSearcher(true, true);
      holder.decref();
    } catch (Exception e) {
      SolrException.log(log, "Error opening realtime searcher for deleteByQuery", e);
    }

    if (map != null) map.clear();
    if (prevMap != null) prevMap.clear();
    if (prevMap2 != null) prevMap2.clear();

    oldDeletes.clear();
    deleteByQueries.clear();
  }
}
 
Example 6
Source File: ScoreJoinQParserPlugin.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public Weight createWeight(IndexSearcher searcher, org.apache.lucene.search.ScoreMode scoreMode, float boost) throws IOException {
  SolrRequestInfo info = SolrRequestInfo.getRequestInfo();

  CoreContainer container = info.getReq().getCore().getCoreContainer();

  final SolrCore fromCore = container.getCore(fromIndex);

  if (fromCore == null) {
    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Cross-core join: no such core " + fromIndex);
  }
  RefCounted<SolrIndexSearcher> fromHolder = null;
  fromHolder = fromCore.getRegisteredSearcher();
  final Query joinQuery;
  try {
    joinQuery = JoinUtil.createJoinQuery(fromField, true,
        toField, fromQuery, fromHolder.get(), this.scoreMode);
  } finally {
    fromCore.close();
    fromHolder.decref();
  }
  return joinQuery.rewrite(searcher.getIndexReader()).createWeight(searcher, scoreMode, boost);
}
 
Example 7
Source File: RecoveryStrategy.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
final private void cloudDebugLog(SolrCore core, String op) {
  if (!log.isDebugEnabled()) {
    return;
  }
  try {
    RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
    SolrIndexSearcher searcher = searchHolder.get();
    try {
      final int totalHits = searcher.count(new MatchAllDocsQuery());
      final String nodeName = core.getCoreContainer().getZkController().getNodeName();
      log.debug("[{}] {} [{} total hits]", nodeName, op, totalHits);
    } finally {
      searchHolder.decref();
    }
  } catch (Exception e) {
    log.debug("Error in solrcloud_debug block", e);
  }
}
 
Example 8
Source File: DirectUpdateHandler2.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void allowDuplicateUpdate(AddUpdateCommand cmd) throws IOException {
  RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
  try {
    IndexWriter writer = iw.get();
    Iterable<Document> nestedDocs = cmd.getLuceneDocsIfNested();
    if (nestedDocs != null) {
      writer.addDocuments(nestedDocs);
    } else {
      writer.addDocument(cmd.getLuceneDocument());
    }
    if (ulog != null) ulog.add(cmd);

  } finally {
    iw.decref();
  }

}
 
Example 9
Source File: AbstractAlfrescoSolrIT.java    From SearchServices with GNU Lesser General Public License v3.0 5 votes vote down vote up
public static void waitForDocCount(Query query, long expectedNumFound, long waitMillis)
        throws Exception
{
    Date date = new Date();
    long timeout = date.getTime() + waitMillis;

    RefCounted<SolrIndexSearcher> ref = null;
    int totalHits = 0;
    while(new Date().getTime() < timeout)
    {
        try
        {
            ref = getCore().getSearcher();
            SolrIndexSearcher searcher = ref.get();
            TopDocs topDocs = searcher.search(query, 10);
            totalHits = topDocs.totalHits;
            if (topDocs.totalHits == expectedNumFound)
            {
                LOG.warn("Query \"" + query + "\" returned " + totalHits + " as expected");
                return;
            }
            else
            {
                LOG.warn("Query \"" + query + "\" returned " + totalHits + ", expected " + expectedNumFound);
                Thread.sleep(2000);
            }
        }
        finally
        {
            ref.decref();
        }
    }
    throw new Exception("Wait error expected "+expectedNumFound+" found "+totalHits+" : "+query.toString());
}
 
Example 10
Source File: TestMergePolicyConfig.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Given an SolrCore, asserts that each segment in the (searchable) index 
 * has a compound file status that matches the expected input.
 */
public static void assertCompoundSegments(SolrCore core, boolean compound) {
  RefCounted<SolrIndexSearcher> searcherRef = core.getRegisteredSearcher();
  try {
    assertCompoundSegments(searcherRef.get().getRawReader(), compound);
  } finally {
    searcherRef.decref();
  }
}
 
Example 11
Source File: UpdateLog.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
Long getMaxVersionFromIndex() {
  RefCounted<SolrIndexSearcher> newestSearcher = (uhandler != null && uhandler.core != null)
    ? uhandler.core.getRealtimeSearcher() : null;
  if (newestSearcher == null)
    throw new IllegalStateException("No searcher available to lookup max version from index!");
  
  try {
    seedBucketsWithHighestVersion(newestSearcher.get());
    return getCurrentMaxVersion();
  } finally {
    newestSearcher.decref();
  }
}
 
Example 12
Source File: SolrCore.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void seedVersionBuckets() {
  UpdateHandler uh = getUpdateHandler();
  if (uh != null && uh.getUpdateLog() != null) {
    RefCounted<SolrIndexSearcher> newestSearcher = getRealtimeSearcher();
    if (newestSearcher != null) {
      try {
        uh.getUpdateLog().seedBucketsWithHighestVersion(newestSearcher.get());
      } finally {
        newestSearcher.decref();
      }
    } else {
      log.warn("No searcher available! Cannot seed version buckets with max from index.");
    }
  }
}
 
Example 13
Source File: DirectUpdateHandler2.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void deleteAll() throws IOException {
  if (log.isInfoEnabled()) {
    log.info("{} REMOVING ALL DOCUMENTS FROM INDEX", core.getLogId());
  }
  RefCounted<IndexWriter> iw = solrCoreState.getIndexWriter(core);
  try {
    iw.get().deleteAll();
  } finally {
    iw.decref();
  }
}
 
Example 14
Source File: JoinQParserPlugin.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
JoinParams parseJoin(QParser qparser) throws SyntaxError {
  final String fromField = qparser.getParam("from");
  final String fromIndex = qparser.getParam("fromIndex");
  final String toField = qparser.getParam("to");
  final String v = qparser.localParams.get(QueryParsing.V);
  final String coreName;

  Query fromQuery;
  long fromCoreOpenTime = 0;

  if (fromIndex != null && !fromIndex.equals(qparser.req.getCore().getCoreDescriptor().getName()) ) {
    CoreContainer container = qparser.req.getCore().getCoreContainer();

    // if in SolrCloud mode, fromIndex should be the name of a single-sharded collection
    coreName = ScoreJoinQParserPlugin.getCoreName(fromIndex, container);

    final SolrCore fromCore = container.getCore(coreName);
    if (fromCore == null) {
      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
          "Cross-core join: no such core " + coreName);
    }

    RefCounted<SolrIndexSearcher> fromHolder = null;
    LocalSolrQueryRequest otherReq = new LocalSolrQueryRequest(fromCore, qparser.params);
    try {
      QParser parser = QParser.getParser(v, otherReq);
      fromQuery = parser.getQuery();
      fromHolder = fromCore.getRegisteredSearcher();
      if (fromHolder != null) fromCoreOpenTime = fromHolder.get().getOpenNanoTime();
    } finally {
      otherReq.close();
      fromCore.close();
      if (fromHolder != null) fromHolder.decref();
    }
  } else {
    coreName = null;
    QParser fromQueryParser = qparser.subQuery(v, null);
    fromQueryParser.setIsFilter(true);
    fromQuery = fromQueryParser.getQuery();
  }

  final String indexToUse = coreName == null ? fromIndex : coreName;
  return new JoinParams(fromField, indexToUse, fromQuery, fromCoreOpenTime, toField);
}
 
Example 15
Source File: RecoveryStrategy.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
final private void replicate(String nodeName, SolrCore core, ZkNodeProps leaderprops)
    throws SolrServerException, IOException {

  final String leaderUrl = getReplicateLeaderUrl(leaderprops);

  log.info("Attempting to replicate from [{}].", leaderUrl);

  // send commit
  commitOnLeader(leaderUrl);

  // use rep handler directly, so we can do this sync rather than async
  SolrRequestHandler handler = core.getRequestHandler(ReplicationHandler.PATH);
  ReplicationHandler replicationHandler = (ReplicationHandler) handler;

  if (replicationHandler == null) {
    throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
        "Skipping recovery, no " + ReplicationHandler.PATH + " handler found");
  }

  ModifiableSolrParams solrParams = new ModifiableSolrParams();
  solrParams.set(ReplicationHandler.MASTER_URL, leaderUrl);
  solrParams.set(ReplicationHandler.SKIP_COMMIT_ON_MASTER_VERSION_ZERO, replicaType == Replica.Type.TLOG);
  // always download the tlogs from the leader when running with cdcr enabled. We need to have all the tlogs
  // to ensure leader failover doesn't cause missing docs on the target
  if (core.getUpdateHandler().getUpdateLog() != null
      && core.getUpdateHandler().getUpdateLog() instanceof CdcrUpdateLog) {
    solrParams.set(ReplicationHandler.TLOG_FILES, true);
  }

  if (isClosed()) return; // we check closed on return
  boolean success = replicationHandler.doFetch(solrParams, false).getSuccessful();

  if (!success) {
    throw new SolrException(ErrorCode.SERVER_ERROR, "Replication for recovery failed.");
  }

  // solrcloud_debug
  if (log.isDebugEnabled()) {
    try {
      RefCounted<SolrIndexSearcher> searchHolder = core
          .getNewestSearcher(false);
      SolrIndexSearcher searcher = searchHolder.get();
      Directory dir = core.getDirectoryFactory().get(core.getIndexDir(), DirContext.META_DATA, null);
      try {
        final IndexCommit commit = core.getDeletionPolicy().getLatestCommit();
        if (log.isDebugEnabled()) {
          log.debug("{} replicated {} from {} gen: {} data: {} index: {} newIndex: {} files: {}"
              , core.getCoreContainer().getZkController().getNodeName()
              , searcher.count(new MatchAllDocsQuery())
              , leaderUrl
              , (null == commit ? "null" : commit.getGeneration())
              , core.getDataDir()
              , core.getIndexDir()
              , core.getNewIndexDir()
              , Arrays.asList(dir.listAll()));
        }
      } finally {
        core.getDirectoryFactory().release(dir);
        searchHolder.decref();
      }
    } catch (Exception e) {
      log.debug("Error in solrcloud_debug block", e);
    }
  }

}
 
Example 16
Source File: TestHalfAndHalfDocValues.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testHalfAndHalfDocValues() throws Exception {
  // Insert two docs without docvalues
  String fieldname = "string_add_dv_later";
  assertU(adoc("id", "3", fieldname, "c"));
  assertU(commit());
  assertU(adoc("id", "1", fieldname, "a"));
  assertU(commit());


  try (SolrCore core = h.getCoreInc()) {
    assertFalse(core.getLatestSchema().getField(fieldname).hasDocValues());
    // Add docvalues to the field type
    IndexSchema schema = core.getLatestSchema();
    SchemaField oldField = schema.getField(fieldname);
    int newProperties = oldField.getProperties() | SchemaField.DOC_VALUES;

    SchemaField sf = new SchemaField(fieldname, oldField.getType(), newProperties, null);
    schema.getFields().put(fieldname, sf);

    // Insert a new doc with docvalues
    assertU(adoc("id", "2", fieldname, "b"));
    assertU(commit());


    // Check there are a mix of segments with and without docvalues
    final RefCounted<SolrIndexSearcher> searcherRef = core.openNewSearcher(true, true);
    final SolrIndexSearcher searcher = searcherRef.get();
    try {
      final DirectoryReader topReader = searcher.getRawReader();

      //Assert no merges

      assertEquals(3, topReader.numDocs());
      assertEquals(3, topReader.leaves().size());

      final FieldInfos infos = FieldInfos.getMergedFieldInfos(topReader);
      //The global field type should have docValues because a document with dvs was added
      assertEquals(DocValuesType.SORTED, infos.fieldInfo(fieldname).getDocValuesType());

      for (LeafReaderContext ctx : topReader.leaves()) {
        LeafReader r = ctx.reader();
        //Make sure there were no merges
        assertEquals(1, r.numDocs());
        Document doc = r.document(0);
        String id = doc.getField("id").stringValue();

        if (id.equals("1") || id.equals("3")) {
          assertEquals(DocValuesType.NONE, r.getFieldInfos().fieldInfo(fieldname).getDocValuesType());
        } else {
          assertEquals(DocValuesType.SORTED, r.getFieldInfos().fieldInfo(fieldname).getDocValuesType());
        }

      }
    } finally {
      searcherRef.decref();
    }
  }

  // Assert sort order is correct
  assertQ(req("q", "string_add_dv_later:*", "sort", "string_add_dv_later asc"),
      "//*[@numFound='3']",
      "//result/doc[1]/str[@name='id'][.=1]",
      "//result/doc[2]/str[@name='id'][.=2]",
      "//result/doc[3]/str[@name='id'][.=3]"
  );
}
 
Example 17
Source File: IndexSizeEstimatorTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
public void testEstimator() throws Exception {
  JettySolrRunner jetty = cluster.getRandomJetty(random());
  String randomCoreName = jetty.getCoreContainer().getAllCoreNames().iterator().next();
  SolrCore core = jetty.getCoreContainer().getCore(randomCoreName);
  RefCounted<SolrIndexSearcher> searcherRef = core.getSearcher();
  try {
    SolrIndexSearcher searcher = searcherRef.get();
    // limit the max length
    IndexSizeEstimator estimator = new IndexSizeEstimator(searcher.getRawReader(), 20, 50, true, true);
    IndexSizeEstimator.Estimate estimate = estimator.estimate();
    Map<String, Long> fieldsBySize = estimate.getFieldsBySize();
    assertFalse("empty fieldsBySize", fieldsBySize.isEmpty());
    assertEquals(fieldsBySize.toString(), fields.size(), fieldsBySize.size());
    fieldsBySize.forEach((k, v) -> assertTrue("unexpected size of " + k + ": " + v, v > 0));
    Map<String, Long> typesBySize = estimate.getTypesBySize();
    assertFalse("empty typesBySize", typesBySize.isEmpty());
    assertTrue("expected at least 8 types: " + typesBySize.toString(), typesBySize.size() >= 8);
    typesBySize.forEach((k, v) -> assertTrue("unexpected size of " + k + ": " + v, v > 0));
    Map<String, Object> summary = estimate.getSummary();
    assertNotNull("summary", summary);
    assertFalse("empty summary", summary.isEmpty());
    assertEquals(summary.keySet().toString(), fields.size(), summary.keySet().size());
    Map<String, Object> details = estimate.getDetails();
    assertNotNull("details", details);
    assertFalse("empty details", details.isEmpty());
    // by type
    assertEquals(details.keySet().toString(), 6, details.keySet().size());

    // check sampling
    estimator.setSamplingThreshold(searcher.getRawReader().maxDoc() / 2);
    IndexSizeEstimator.Estimate sampledEstimate = estimator.estimate();
    Map<String, Long> sampledFieldsBySize = sampledEstimate.getFieldsBySize();
    assertFalse("empty fieldsBySize", sampledFieldsBySize.isEmpty());
    // verify that the sampled values are within 50% of the original values
    fieldsBySize.forEach((field, size) -> {
      Long sampledSize = sampledFieldsBySize.get(field);
      assertNotNull("sampled size for " + field + " is missing in " + sampledFieldsBySize, sampledSize);
      double delta = (double) size * 0.5;
      assertEquals("sampled size of " + field + " is wildly off", (double)size, (double)sampledSize, delta);
    });
    // verify the reader is still usable - SOLR-13694
    IndexReader reader = searcher.getRawReader();
    for (LeafReaderContext context : reader.leaves()) {
      LeafReader leafReader = context.reader();
      assertTrue("unexpected LeafReader class: " + leafReader.getClass().getName(), leafReader instanceof CodecReader);
      Bits liveDocs = leafReader.getLiveDocs();
      CodecReader codecReader = (CodecReader) leafReader;
      StoredFieldsReader storedFieldsReader = codecReader.getFieldsReader();
      StoredFieldVisitor visitor = new DocumentStoredFieldVisitor();
      assertNotNull(storedFieldsReader);
      for (int docId = 0; docId < leafReader.maxDoc(); docId++) {
        if (liveDocs != null && !liveDocs.get(docId)) {
          continue;
        }
        storedFieldsReader.visitDocument(docId, visitor);
      }
    }
  } finally {
    searcherRef.decref();
    core.close();
  }
}
 
Example 18
Source File: RequestSyncShardOp.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
public void execute(CallInfo it) throws Exception {
  final SolrParams params = it.req.getParams();

  log.info("I have been requested to sync up my shard");

  String cname = params.required().get(CoreAdminParams.CORE);

  ZkController zkController = it.handler.coreContainer.getZkController();
  if (zkController == null) {
    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Only valid for SolrCloud");
  }

  SyncStrategy syncStrategy = null;
  try (SolrCore core = it.handler.coreContainer.getCore(cname)) {

    if (core != null) {
      syncStrategy = new SyncStrategy(core.getCoreContainer());

      Map<String, Object> props = new HashMap<>();
      props.put(ZkStateReader.BASE_URL_PROP, zkController.getBaseUrl());
      props.put(ZkStateReader.CORE_NAME_PROP, cname);
      props.put(ZkStateReader.NODE_NAME_PROP, zkController.getNodeName());

      boolean success = syncStrategy.sync(zkController, core, new ZkNodeProps(props), true).isSuccess();
      // solrcloud_debug
      if (log.isDebugEnabled()) {
        try {
          RefCounted<SolrIndexSearcher> searchHolder = core
              .getNewestSearcher(false);
          SolrIndexSearcher searcher = searchHolder.get();
          try {
            if (log.isDebugEnabled()) {
              log.debug("{} synched {}", core.getCoreContainer().getZkController().getNodeName()
                  , searcher.count(new MatchAllDocsQuery()));
            }
          } finally {
            searchHolder.decref();
          }
        } catch (Exception e) {
          log.debug("Error in solrcloud_debug block", e);
        }
      }
      if (!success) {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Sync Failed");
      }
    } else {
      SolrException.log(log, "Could not find core to call sync:" + cname);
    }
  } finally {
    // no recoveryStrat close for now
    if (syncStrategy != null) {
      syncStrategy.close();
    }
  }
}
 
Example 19
Source File: TestSortableTextField.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testWhiteboxIndexReader() throws Exception {
  assertU(adoc("id","1",
               "whitespace_stxt", "how now brown cow ?",
               "whitespace_m_stxt", "xxx",
               "whitespace_m_stxt", "yyy",
               "whitespace_f_stxt", "aaa bbb",
               "keyword_stxt", "Blarggghhh!"));
  assertU(commit());

  final RefCounted<SolrIndexSearcher> searcher = h.getCore().getNewestSearcher(false);
  try {
    final LeafReader r = searcher.get().getSlowAtomicReader();

    // common cases...
    for (String field : Arrays.asList("keyword_stxt", "keyword_dv_stxt",
                                      "whitespace_stxt", "whitespace_f_stxt", "whitespace_l_stxt")) {
      assertNotNull("FieldInfos: " + field, r.getFieldInfos().fieldInfo(field));
      assertEquals("DocValuesType: " + field,
                   DocValuesType.SORTED, r.getFieldInfos().fieldInfo(field).getDocValuesType());
      assertNotNull("DocValues: " + field, r.getSortedDocValues(field));
      assertNotNull("Terms: " + field, r.terms(field));
                    
    }
    
    // special cases...
    assertNotNull(r.getFieldInfos().fieldInfo("whitespace_nodv_stxt"));
    assertEquals(DocValuesType.NONE,
                 r.getFieldInfos().fieldInfo("whitespace_nodv_stxt").getDocValuesType());
    assertNull(r.getSortedDocValues("whitespace_nodv_stxt"));
    assertNotNull(r.terms("whitespace_nodv_stxt"));
    // 
    assertNotNull(r.getFieldInfos().fieldInfo("whitespace_nois_stxt"));
    assertEquals(DocValuesType.SORTED,
                 r.getFieldInfos().fieldInfo("whitespace_nois_stxt").getDocValuesType());
    assertNotNull(r.getSortedDocValues("whitespace_nois_stxt"));
    assertNull(r.terms("whitespace_nois_stxt"));
    //
    assertNotNull(r.getFieldInfos().fieldInfo("whitespace_m_stxt"));
    assertEquals(DocValuesType.SORTED_SET,
                 r.getFieldInfos().fieldInfo("whitespace_m_stxt").getDocValuesType());
    assertNotNull(r.getSortedSetDocValues("whitespace_m_stxt"));
    assertNotNull(r.terms("whitespace_m_stxt"));
      
  } finally {
    if (null != searcher) {
      searcher.decref();
    }
  }
}
 
Example 20
Source File: TestRetrieveFieldsOptimizer.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings({"unchecked"})
private static void setupAllFields() throws IOException {

  IndexSchema schema = h.getCore().getLatestSchema();

  // Add all the types before the fields.
  Map<String, Map<String, String>> fieldsToAdd = new HashMap<>();

  // We need our special id fields to find the docs later.
  typesHolder.addFieldType(schema, idNotStoredDv, RetrieveFieldType.TEST_TYPE.STRING);
  fieldsToAdd.put(idNotStoredDv, map("stored", "false", "docValues", "true", "multiValued", "false"));

  typesHolder.addFieldType(schema, idStoredNotDv, RetrieveFieldType.TEST_TYPE.STRING);
  fieldsToAdd.put(idStoredNotDv, map("stored", "true", "docValues", "false", "multiValued", "false"));

  for (RetrieveFieldType.TEST_TYPE type : RetrieveFieldType.solrClassMap.keySet()) {
    // We happen to be naming the fields and types identically.
    String myName = type.toString() + storedNotDvSv;
    typesHolder.addFieldType(schema, myName, type);
    fieldsToAdd.put(myName, map("stored", "true", "docValues", "false", "multiValued", "false"));

    myName = type.toString() + storedAndDvSv;
    typesHolder.addFieldType(schema, myName, type);
    fieldsToAdd.put(myName, map("stored", "true", "docValues", "true", "multiValued", "false"));

    myName = type.toString() + notStoredDvSv;
    typesHolder.addFieldType(schema, myName, type);
    fieldsToAdd.put(myName, map("stored", "false", "docValues", "true", "multiValued", "false"));

    myName = type.toString() + storedNotDvMv;
    typesHolder.addFieldType(schema, myName, type);
    fieldsToAdd.put(myName, map("stored", "true", "docValues", "false", "multiValued", "true"));

    myName = type.toString() + storedAndDvMv;
    typesHolder.addFieldType(schema, myName, type);
    fieldsToAdd.put(myName, map("stored", "true", "docValues", "true", "multiValued", "true"));

    myName = type.toString() + notStoredDvMv;
    typesHolder.addFieldType(schema, myName, type);
    fieldsToAdd.put(myName, map("stored", "false", "docValues", "true", "multiValued", "true"));
  }

  schema = typesHolder.addFieldTypes(schema);

  for (Map.Entry<String, Map<String, String>> ent : fieldsToAdd.entrySet()) {
    fieldsHolder.addField(schema, ent.getKey(), ent.getKey(), ent.getValue());
  }
  schema = fieldsHolder.addFields(schema);

  h.getCore().setLatestSchema(schema);

  // All that setup work and we're only going to add a very few docs!
  for (int idx = 0; idx < 10; ++idx) {
    addDocWithAllFields(idx);
  }
  assertU(commit());
  // Now we need to massage the expected values returned based on the docValues type 'cause it's weird.
  final RefCounted<SolrIndexSearcher> refCounted = h.getCore().getNewestSearcher(true);
  try {
    //static Map<String, Map<String, List<String>>>
    for (Map<String, List<String>> docFieldsEnt : allFieldValuesInput.values()) {
      for (Map.Entry<String, List<String>> oneField : docFieldsEnt.entrySet()) {
        RetrieveField field = fieldsHolder.getTestField(oneField.getKey());
        field.expectedValsAsStrings(refCounted.get().getSlowAtomicReader().getFieldInfos().fieldInfo(field.name),
            oneField.getValue());
      }
    }
  } finally {
    refCounted.decref();
  }
 }