org.apache.solr.update.CommitUpdateCommand Java Examples

The following examples show how to use org.apache.solr.update.CommitUpdateCommand. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: UpdateProcessorTestBase.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
protected void processCommit(final String chain) throws IOException {
  SolrCore core = h.getCore();
  UpdateRequestProcessorChain pc = core.getUpdateProcessingChain(chain);
  assertNotNull("No Chain named: " + chain, pc);

  SolrQueryResponse rsp = new SolrQueryResponse();

  SolrQueryRequest req = new LocalSolrQueryRequest(core, new ModifiableSolrParams());

  CommitUpdateCommand cmd = new CommitUpdateCommand(req,false);
  UpdateRequestProcessor processor = pc.createProcessor(req, rsp);
  try {
    processor.processCommit(cmd);
  } finally {
    req.close();
  }
}
 
Example #2
Source File: IgnoreCommitOptimizeUpdateProcessorFactoryTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
SolrQueryResponse processCommit(final String chain, boolean optimize, Boolean commitEndPoint) throws IOException {
  SolrCore core = h.getCore();
  UpdateRequestProcessorChain pc = core.getUpdateProcessingChain(chain);
  assertNotNull("No Chain named: " + chain, pc);

  SolrQueryResponse rsp = new SolrQueryResponse();
  SolrQueryRequest req = new LocalSolrQueryRequest(core, new ModifiableSolrParams());

  if (commitEndPoint != null) {
    ((ModifiableSolrParams)req.getParams()).set(
        DistributedUpdateProcessor.COMMIT_END_POINT, commitEndPoint.booleanValue());
  }

  try {
    SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req,rsp));
    CommitUpdateCommand cmd = new CommitUpdateCommand(req, false);
    cmd.optimize = optimize;
    UpdateRequestProcessor processor = pc.createProcessor(req, rsp);
    processor.processCommit(cmd);
  } finally {
    SolrRequestInfo.clearRequestInfo();
    req.close();
  }
  return rsp;
}
 
Example #3
Source File: DistributedUpdateProcessor.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
protected void doLocalCommit(CommitUpdateCommand cmd) throws IOException {
  if (vinfo != null) {
    long commitVersion = vinfo.getNewClock();
    cmd.setVersion(commitVersion);
    vinfo.lockForUpdate();
  }
  try {

    if (ulog == null || ulog.getState() == UpdateLog.State.ACTIVE || (cmd.getFlags() & UpdateCommand.REPLAY) != 0) {
      super.processCommit(cmd);
    } else {
      if (log.isInfoEnabled()) {
        log.info("Ignoring commit while not ACTIVE - state: {} replay: {}"
            , ulog.getState(), ((cmd.getFlags() & UpdateCommand.REPLAY) != 0));
      }
    }

  } finally {
    if (vinfo != null) {
      vinfo.unlockForUpdate();
    }
  }
}
 
Example #4
Source File: SolrInformationServer.java    From SearchServices with GNU Lesser General Public License v3.0 5 votes vote down vote up
@Override
public void hardCommit() throws IOException
{
    // avoid multiple commits and warming searchers
    commitAndRollbackLock.writeLock().lock();
    try
    {
        UpdateRequestProcessor processor = null;
        try (SolrQueryRequest request = newSolrQueryRequest())
        {
            processor = this.core.getUpdateProcessingChain(null).createProcessor(request, newSolrQueryResponse());
            CommitUpdateCommand commitUpdateCommand = new CommitUpdateCommand(request, false);
            commitUpdateCommand.openSearcher = false;
            commitUpdateCommand.softCommit = false;
            commitUpdateCommand.waitSearcher = false;
            processor.processCommit(commitUpdateCommand);
        }
        finally
        {
            if (processor != null)
            {
                processor.finish();
            }
        }
    }
    finally
    {
        commitAndRollbackLock.writeLock().unlock();
    }
}
 
Example #5
Source File: UpdateIndexAuthorizationProcessorTest.java    From incubator-sentry with Apache License 2.0 5 votes vote down vote up
private void verifyAuthorized(String collection, String user) throws Exception {
  SolrQueryRequestBase req = new SolrQueryRequestBase(core, new MapSolrParams(new HashMap())) {};
  getProcessor(collection, user).processAdd(new AddUpdateCommand(req));
  getProcessor(collection, user).processDelete(new DeleteUpdateCommand(req));
  DeleteUpdateCommand deleteByQueryCommand = new DeleteUpdateCommand(req);
  deleteByQueryCommand.setQuery("*:*");
  getProcessor(collection, user).processDelete(deleteByQueryCommand);
  getProcessor(collection, user).processMergeIndexes(new MergeIndexesCommand(null, req));
  getProcessor(collection, user).processCommit(new CommitUpdateCommand(req, false));
  getProcessor(collection, user).processRollback(new RollbackUpdateCommand(req));
  getProcessor(collection, user).finish();
}
 
Example #6
Source File: AbstractIngestionHandler.java    From chronix.server with Apache License 2.0 5 votes vote down vote up
@Override
@SuppressWarnings("PMD.SignatureDeclareThrowsException")
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
    formatResponseAsJson(req);

    if (req.getContentStreams() == null) {
        LOGGER.warn("no content stream");
        rsp.add("error", "No content stream");
        return;
    }

    boolean commit = Boolean.parseBoolean(req.getParams().get("commit", "true"));

    InputStream stream = req.getContentStreams().iterator().next().getStream();
    stream = detectGzip(stream);

    MetricTimeSeriesConverter converter = new MetricTimeSeriesConverter();

    UpdateRequestProcessorChain processorChain = req.getCore().getUpdateProcessorChain(req.getParams());
    UpdateRequestProcessor processor = processorChain.createProcessor(req, rsp);
    try {
        for (MetricTimeSeries series : formatParser.parse(stream)) {
            SolrInputDocument document = new SolrInputDocument();
            converter.to(series).getFields().forEach(document::addField);
            storeDocument(document, processor, req);
        }

        if (commit) {
            LOGGER.debug("Committing transaction...");
            processor.processCommit(new CommitUpdateCommand(req, false));
            LOGGER.debug("Committed transaction");
        } else {
            LOGGER.debug("Only adding documents.");
        }
    } finally {
        processor.finish();
    }
}
 
Example #7
Source File: TestLazyCores.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void checkSearch(SolrCore core) throws IOException {
  addLazy(core, "id", "0");
  addLazy(core, "id", "1", "v_t", "Hello Dude");
  addLazy(core, "id", "2", "v_t", "Hello Yonik");
  addLazy(core, "id", "3", "v_s", "{!literal}");
  addLazy(core, "id", "4", "v_s", "other stuff");
  addLazy(core, "id", "5", "v_f", "3.14159");
  addLazy(core, "id", "6", "v_f", "8983");

  SolrQueryRequest req = makeReq(core);
  CommitUpdateCommand cmtCmd = new CommitUpdateCommand(req, false);
  core.getUpdateHandler().commit(cmtCmd);

  // Just get a couple of searches to work!
  assertQ("test prefix query",
      makeReq(core, "q", "{!prefix f=v_t}hel", "wt", "xml")
      , "//result[@numFound='2']"
  );

  assertQ("test raw query",
      makeReq(core, "q", "{!raw f=v_t}hello", "wt", "xml")
      , "//result[@numFound='2']"
  );

  // no analysis is done, so these should match nothing
  assertQ("test raw query",
      makeReq(core, "q", "{!raw f=v_t}Hello", "wt", "xml")
      , "//result[@numFound='0']"
  );
  assertQ("test raw query",
      makeReq(core, "q", "{!raw f=v_f}1.5", "wt", "xml")
      , "//result[@numFound='0']"
  );
}
 
Example #8
Source File: TestMergePolicyConfig.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testNoMergePolicyFactoryConfig() throws Exception {
  initCore("solrconfig-nomergepolicyfactory.xml","schema-minimal.xml");
  IndexWriterConfig iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore());
  NoMergePolicy mergePolicy = assertAndCast(NoMergePolicy.class,
      iwc.getMergePolicy());

  assertCommitSomeNewDocs();

  assertCommitSomeNewDocs();
  assertNumSegments(h.getCore(), 2);

  assertU(optimize());
  assertNumSegments(h.getCore(), 2);
  deleteCore();
  initCore("solrconfig-nomergepolicyfactory.xml","schema-minimal.xml");
  iwc = solrConfig.indexConfig.toIndexWriterConfig(h.getCore());
  assertEquals(mergePolicy, iwc.getMergePolicy());

  UpdateHandler updater = h.getCore().getUpdateHandler();
  SolrQueryRequest req = req();
  CommitUpdateCommand cmtCmd = new CommitUpdateCommand(req, true);
  cmtCmd.maxOptimizeSegments = -1;
  expectThrows(IllegalArgumentException.class, () -> {
    updater.commit(cmtCmd);
  });

}
 
Example #9
Source File: ChangedSchemaMergeTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Test
public void testOptimizeDiffSchemas() throws Exception {
  // load up a core (why not put it on disk?)
  CoreContainer cc = init();
  try (SolrCore changed = cc.getCore("changed")) {

    assertSimilarity(changed, simfac1);
                     
    // add some documents
    addDoc(changed, "id", "1", "which", "15", "text", "some stuff with which");
    addDoc(changed, "id", "2", "which", "15", "text", "some stuff with which");
    addDoc(changed, "id", "3", "which", "15", "text", "some stuff with which");
    addDoc(changed, "id", "4", "which", "15", "text", "some stuff with which");
    SolrQueryRequest req = new LocalSolrQueryRequest(changed, new NamedList<>());
    changed.getUpdateHandler().commit(new CommitUpdateCommand(req, false));

    // write the new schema out and make it current
    FileUtils.writeStringToFile(schemaFile, withoutWhich, StandardCharsets.UTF_8);

    IndexSchema iSchema = IndexSchemaFactory.buildIndexSchema("schema.xml", changed.getSolrConfig());
    changed.setLatestSchema(iSchema);
    
    assertSimilarity(changed, simfac2);
    // sanity check our sanity check
    assertFalse("test is broken: both simfacs are the same", simfac1.equals(simfac2)); 

    addDoc(changed, "id", "1", "text", "some stuff without which");
    addDoc(changed, "id", "5", "text", "some stuff without which");

    changed.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
    changed.getUpdateHandler().commit(new CommitUpdateCommand(req, true));
  } catch (Throwable e) {
    log.error("Test exception, logging so not swallowed if there is a (finally) shutdown exception: {}"
        , e.getMessage(), e);
    throw e;
  } finally {
    if (cc != null) cc.shutdown();
  }
}
 
Example #10
Source File: RecoveryStrategy.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
final private Future<RecoveryInfo> replay(SolrCore core)
    throws InterruptedException, ExecutionException {
  if (testing_beforeReplayBufferingUpdates != null) {
    testing_beforeReplayBufferingUpdates.run();
  }
  if (replicaType == Replica.Type.TLOG) {
    // roll over all updates during buffering to new tlog, make RTG available
    SolrQueryRequest req = new LocalSolrQueryRequest(core,
        new ModifiableSolrParams());
    core.getUpdateHandler().getUpdateLog().copyOverBufferingUpdates(new CommitUpdateCommand(req, false));
    req.close();
    return null;
  }
  Future<RecoveryInfo> future = core.getUpdateHandler().getUpdateLog().applyBufferedUpdates();
  if (future == null) {
    // no replay needed\
    log.info("No replay needed.");
  } else {
    log.info("Replaying buffered documents.");
    // wait for replay
    RecoveryInfo report = future.get();
    if (report.failed) {
      SolrException.log(log, "Replay failed");
      throw new SolrException(ErrorCode.SERVER_ERROR, "Replay failed");
    }
  }

  // the index may ahead of the tlog's caches after recovery, by calling this tlog's caches will be purged
  core.getUpdateHandler().getUpdateLog().openRealtimeSearcher();

  // solrcloud_debug
  cloudDebugLog(core, "replayed");

  return future;
}
 
Example #11
Source File: BlobHandler.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public static void indexMap(SolrQueryRequest req, SolrQueryResponse rsp, Map<String, Object> doc) throws IOException {
  SolrInputDocument solrDoc = new SolrInputDocument();
  for (Map.Entry<String, Object> e : doc.entrySet()) solrDoc.addField(e.getKey(), e.getValue());
  UpdateRequestProcessorChain processorChain = req.getCore().getUpdateProcessorChain(req.getParams());
  try (UpdateRequestProcessor processor = processorChain.createProcessor(req, rsp)) {
    AddUpdateCommand cmd = new AddUpdateCommand(req);
    cmd.solrDoc = solrDoc;
    log.info("Adding doc: {}", doc);
    processor.processAdd(cmd);
    log.info("committing doc: {}", doc);
    processor.processCommit(new CommitUpdateCommand(req, false));
    processor.finish();
  }
}
 
Example #12
Source File: RequestHandlerUtils.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Modify UpdateCommand based on request parameters
 */
public static void updateCommit(CommitUpdateCommand cmd, SolrParams params) {
  if( params == null ) return;

  cmd.openSearcher = params.getBool( UpdateParams.OPEN_SEARCHER, cmd.openSearcher );
  cmd.waitSearcher = params.getBool( UpdateParams.WAIT_SEARCHER, cmd.waitSearcher );
  cmd.softCommit = params.getBool( UpdateParams.SOFT_COMMIT, cmd.softCommit );
  cmd.expungeDeletes = params.getBool( UpdateParams.EXPUNGE_DELETES, cmd.expungeDeletes );
  cmd.maxOptimizeSegments = params.getInt( UpdateParams.MAX_OPTIMIZE_SEGMENTS, cmd.maxOptimizeSegments );
  cmd.prepareCommit = params.getBool( UpdateParams.PREPARE_COMMIT,   cmd.prepareCommit );
}
 
Example #13
Source File: TolerantUpdateProcessor.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public void processCommit(CommitUpdateCommand cmd) throws IOException {
  try {
    super.processCommit(cmd);
  } catch (Throwable t) {
    // we're not tolerant of errors from this type of command, but we
    // do need to track it so we can annotate it with any other errors we were already tolerant of
    firstErrTracker.caught(t);
    throw t;
  }
}
 
Example #14
Source File: IgnoreCommitOptimizeUpdateProcessorFactory.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public void processCommit(CommitUpdateCommand cmd) throws IOException {

  if (ignoreOptimizeOnly && !cmd.optimize) {
    // we're setup to only ignore optimize requests so it's OK to pass this commit on down the line
    if (next != null) next.processCommit(cmd);
    return;
  }

  if (cmd.getReq().getParams().getBool(DistributedUpdateProcessor.COMMIT_END_POINT, false)) {
    // this is a targeted commit from replica to leader needed for recovery, so can't be ignored
    if (next != null) next.processCommit(cmd);
    return;
  }

  final String cmdType = cmd.optimize ? "optimize" : "commit";
  if (errorCode != null) {
    IgnoreCommitOptimizeUpdateProcessorFactory.log.info(
        "{} from client application ignored with error code: {}", cmdType, errorCode.code);
    rsp.setException(new SolrException(errorCode, responseMsg));
  } else {
    // errorcode is null, treat as a success with an optional message warning the commit request was ignored
    IgnoreCommitOptimizeUpdateProcessorFactory.log.info(
        "{} from client application ignored with status code: 200", cmdType);
    if (responseMsg != null) {
      NamedList<Object> responseHeader = rsp.getResponseHeader();
      if (responseHeader != null) {
        responseHeader.add("msg", responseMsg);
      } else {
        responseHeader = new SimpleOrderedMap<Object>();
        responseHeader.add("msg", responseMsg);
        rsp.addResponseHeader(responseHeader);
      }
    }
  }
}
 
Example #15
Source File: LogUpdateProcessorFactory.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public void processCommit( CommitUpdateCommand cmd ) throws IOException {
  if (logDebug) {
    log.debug("PRE_UPDATE {} {}", cmd, req);
  }
  if (next != null) next.processCommit(cmd);


  final String msg = cmd.optimize ? "optimize" : "commit";
  toLog.add(msg, "");
}
 
Example #16
Source File: SolrInformationServer.java    From SearchServices with GNU Lesser General Public License v3.0 5 votes vote down vote up
@Override
public void commit() throws IOException
{
    // avoid multiple commits and warming searchers
    commitAndRollbackLock.writeLock().lock();
    try
    {
        canUpdate();
        UpdateRequestProcessor processor = null;
        try (SolrQueryRequest request = newSolrQueryRequest())
        {
            processor = this.core.getUpdateProcessingChain(null).createProcessor(request, newSolrQueryResponse());
            processor.processCommit(new CommitUpdateCommand(request, false));
        }
        finally
        {
            if (processor != null)
            {
                processor.finish();
            }
        }
    }
    finally
    {
        commitAndRollbackLock.writeLock().unlock();
    }
}
 
Example #17
Source File: BufferingRequestProcessor.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
public void processCommit(CommitUpdateCommand cmd) throws IOException {
  commitCommands.add( cmd );
}
 
Example #18
Source File: UpdateIndexAuthorizationProcessor.java    From incubator-sentry with Apache License 2.0 4 votes vote down vote up
@Override
public void processCommit(CommitUpdateCommand cmd) throws IOException
{
  authorizeCollectionAction(cmd.name());
  super.processCommit(cmd);
}
 
Example #19
Source File: TermRecognitionRequestHandler.java    From jate with GNU Lesser General Public License v3.0 4 votes vote down vote up
@Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws Exception {
    log.info("Term recognition request handler...");
    setTopInitArgsAsInvariants(req);

    final String jatePropertyFile = req.getParams().get(JATE_PROPERTY_FILE);
    final String algorithmName = req.getParams().get(TERM_RANKING_ALGORITHM);
    final Boolean isExtraction = req.getParams().getBool(CANDIDATE_EXTRACTION);
    final String outFilePath = req.getParams().get(AppParams.OUTPUT_FILE.getParamKey());
    final Boolean isIndexTerms = req.getParams().getBool(INDEX_TERM);
    final Boolean isBoosted = req.getParams().getBool(BOOSTING);

    final Algorithm algorithm = getAlgorithm(algorithmName);

    JATEProperties properties = App.getJateProperties(jatePropertyFile);

    final SolrIndexSearcher searcher = req.getSearcher();
    try {
     if (isExtraction) {
         log.info("start candidate extraction (i.e., re-index of whole corpus) ...");
         generalTRProcessor.candidateExtraction(searcher.getCore(), jatePropertyFile);
         log.info("complete candidate terms indexing.");
     }
	
     Map<String, String> trRunTimeParams = initialiseTRRunTimeParams(req);
     List<JATETerm> termList = generalTRProcessor.rankingAndFiltering(searcher.getCore(), jatePropertyFile, trRunTimeParams,
             algorithm);
	
     log.info(String.format("complete term recognition extraction! Finalized Term size [%s]", termList.size()));
	
     if (isExport(outFilePath)) {
         generalTRProcessor.export(termList);
     }
	
     if (isIndexTerms) {
         log.info("start to index filtered candidate terms ...");
         indexTerms(termList, properties, searcher, isBoosted, isExtraction);
         //trigger 'optimise' to build new index
         searcher.getCore().getUpdateHandler().commit(new CommitUpdateCommand(req, true));
         log.info("complete the indexing of candidate terms.");
	
     }
    } finally {
    	searcher.close();
    }
}
 
Example #20
Source File: SolrInformationServer.java    From SearchServices with GNU Lesser General Public License v3.0 4 votes vote down vote up
@Override
public boolean commit(boolean openSearcher) throws IOException
{
    canUpdate();

    UpdateRequestProcessor processor = null;
    boolean searcherOpened = false;
    try (SolrQueryRequest request = newSolrQueryRequest())
    {
        processor = this.core.getUpdateProcessingChain(null).createProcessor(request, newSolrQueryResponse());
        CommitUpdateCommand command = new CommitUpdateCommand(request, false);
        if (openSearcher)
        {
            RefCounted<SolrIndexSearcher> active = null;
            RefCounted<SolrIndexSearcher> newest = null;
            try
            {
                active = core.getSearcher();
                newest = core.getNewestSearcher(false);
                if (active.get() == newest.get())
                {
                    searcherOpened = command.openSearcher = true;
                    command.waitSearcher = false;
                }
                else
                {
                    searcherOpened = command.openSearcher = false;
                }
            }
            finally
            {
                ofNullable(active).ifPresent(RefCounted::decref);
                ofNullable(newest).ifPresent(RefCounted::decref);
            }
        }
        processor.processCommit(command);
    }
    finally
    {
        if (processor != null)
        {
            processor.finish();
        }
    }

    return searcherOpened;
}
 
Example #21
Source File: AuthDataLoad.java    From SearchServices with GNU Lesser General Public License v3.0 4 votes vote down vote up
@BeforeClass
public static void setup() throws Exception
{
    //Start test haness
    initAlfrescoCore("schema.xml");
    // Root

    NodeRef rootNodeRef = new NodeRef(new StoreRef("workspace", "SpacesStore"), createGUID());
    addStoreRoot(getCore(), dataModel, rootNodeRef, 1, 1, 1, 1);
    //        rsp.add("StoreRootNode", 1);

    // Base

    HashMap<QName, PropertyValue> baseFolderProperties = new HashMap<QName, PropertyValue>();
    baseFolderProperties.put(ContentModel.PROP_NAME, new StringPropertyValue("Base Folder"));
    NodeRef baseFolderNodeRef = new NodeRef(new StoreRef("workspace", "SpacesStore"), createGUID());
    QName baseFolderQName = QName.createQName(NamespaceService.CONTENT_MODEL_1_0_URI, "baseFolder");
    ChildAssociationRef n01CAR = new ChildAssociationRef(ContentModel.ASSOC_CHILDREN, rootNodeRef,
                baseFolderQName, baseFolderNodeRef, true, 0);
    addNode(getCore(), dataModel, 1, 2, 1, ContentModel.TYPE_FOLDER, null, baseFolderProperties, null, "andy",
                new ChildAssociationRef[] { n01CAR }, new NodeRef[] { rootNodeRef }, new String[] { "/"
                            + baseFolderQName.toString() }, baseFolderNodeRef, true);

    // Folders

    HashMap<QName, PropertyValue> folder00Properties = new HashMap<QName, PropertyValue>();
    folder00Properties.put(ContentModel.PROP_NAME, new StringPropertyValue("Folder 0"));
    NodeRef folder00NodeRef = new NodeRef(new StoreRef("workspace", "SpacesStore"), createGUID());
    QName folder00QName = QName.createQName(NamespaceService.CONTENT_MODEL_1_0_URI, "Folder 0");
    ChildAssociationRef folder00CAR = new ChildAssociationRef(ContentModel.ASSOC_CONTAINS,
                baseFolderNodeRef, folder00QName, folder00NodeRef, true, 0);
    addNode(getCore(), dataModel, 1, 3, 1, ContentModel.TYPE_FOLDER, null, folder00Properties, null, "andy",
                new ChildAssociationRef[] { folder00CAR },
                new NodeRef[] { baseFolderNodeRef, rootNodeRef },
                new String[] { "/" + baseFolderQName.toString() + "/" + folder00QName.toString() },
                folder00NodeRef, true);

    for (long i = 0; i < count; i++)
    {
        addAcl(getCore(), dataModel, 10 + (int) i, 10 + (int) i, (int) (i % maxReader), (int) maxReader);

        HashMap<QName, PropertyValue> content00Properties = new HashMap<QName, PropertyValue>();
        MLTextPropertyValue desc00 = new MLTextPropertyValue();
        desc00.addValue(Locale.ENGLISH, "Doc " + i);
        desc00.addValue(Locale.US, "Doc " + i);
        content00Properties.put(ContentModel.PROP_DESCRIPTION, desc00);
        content00Properties.put(ContentModel.PROP_TITLE, desc00);
        content00Properties.put(ContentModel.PROP_CONTENT, new ContentPropertyValue(Locale.UK, 0l, "UTF-8",
                    "text/plain", null));
        content00Properties.put(ContentModel.PROP_NAME, new StringPropertyValue("Doc " + i));
        content00Properties.put(ContentModel.PROP_CREATOR, new StringPropertyValue("Test"));
        content00Properties.put(ContentModel.PROP_MODIFIER, new StringPropertyValue("Test"));
        content00Properties.put(ContentModel.PROP_VERSION_LABEL, new StringPropertyValue("1.0"));
        content00Properties.put(ContentModel.PROP_OWNER, new StringPropertyValue("Test"));
        Date date00 = new Date();
        content00Properties.put(ContentModel.PROP_CREATED, new StringPropertyValue(
                    DefaultTypeConverter.INSTANCE.convert(String.class, date00)));
        content00Properties.put(ContentModel.PROP_MODIFIED, new StringPropertyValue(
                    DefaultTypeConverter.INSTANCE.convert(String.class, date00)));
        HashMap<QName, String> content00Content = new HashMap<QName, String>();
        content00Content.put(ContentModel.PROP_CONTENT, "Test doc number " + i);
        NodeRef content00NodeRef = new NodeRef(new StoreRef("workspace", "SpacesStore"), createGUID());
        QName content00QName = QName.createQName(NamespaceService.CONTENT_MODEL_1_0_URI, "Doc-" + i);
        ChildAssociationRef content00CAR = new ChildAssociationRef(ContentModel.ASSOC_CONTAINS,
                    folder00NodeRef, content00QName, content00NodeRef, true, 0);
        addNode(getCore(), dataModel, 1, 10 + (int) i, 10 + (int) i, ContentModel.TYPE_CONTENT, new QName[] {
                    ContentModel.ASPECT_OWNABLE, ContentModel.ASPECT_TITLED }, content00Properties,
                    content00Content, "andy", new ChildAssociationRef[] { content00CAR }, new NodeRef[] {
                                baseFolderNodeRef, rootNodeRef, folder00NodeRef }, new String[] { "/"
                                + baseFolderQName.toString() + "/" + folder00QName.toString() + "/"
                                + content00QName.toString() }, content00NodeRef, false);
    }
    getCore().getUpdateHandler().commit(new CommitUpdateCommand(req(), false));

}
 
Example #22
Source File: AlfrescoSolrUtils.java    From SearchServices with GNU Lesser General Public License v3.0 4 votes vote down vote up
/**
 * 
 * @param core
 * @param dataModel
 * @param txid
 * @param dbid
 * @param aclid
 * @param type
 * @param aspects
 * @param properties
 * @param content
 * @param owner
 * @param parentAssocs
 * @param ancestors
 * @param paths
 * @param nodeRef
 * @param commit
 * @return
 * @throws IOException
 */
public static NodeRef addNode(SolrCore core, 
                              AlfrescoSolrDataModel dataModel,
                              int txid,
                              int dbid,
                              int aclid,
                              QName type,
                              QName[] aspects,
                              Map<QName, PropertyValue> properties,
                              Map<QName, String> content, 
                              String owner,
                              ChildAssociationRef[] parentAssocs, 
                              NodeRef[] ancestors,
                              String[] paths,
                              NodeRef nodeRef,
                              boolean commit)
{
    SolrServletRequest solrQueryRequest = null;
    try
    {
        AlfrescoCoreAdminHandler admin = (AlfrescoCoreAdminHandler) core.getCoreContainer().getMultiCoreHandler();
        SolrInformationServer solrInformationServer = (SolrInformationServer) admin.getInformationServers().get(core.getName());

        solrQueryRequest = new SolrServletRequest(core, null);
        AddUpdateCommand addDocCmd = new AddUpdateCommand(solrQueryRequest);
        addDocCmd.overwrite = true;
        addDocCmd.solrDoc = createDocument(dataModel, new Long(txid), new Long(dbid), nodeRef, type, aspects,
              properties, content, new Long(aclid), paths, owner, parentAssocs, ancestors, solrInformationServer);
        core.getUpdateHandler().addDoc(addDocCmd);
        if (commit)
        {
            core.getUpdateHandler().commit(new CommitUpdateCommand(solrQueryRequest, false));
        }
    }
    catch (IOException exception)
    {
        throw new RuntimeException(exception);
    }
    finally
    {
        solrQueryRequest.close();
    }
        return nodeRef;
    }
 
Example #23
Source File: AlfrescoSolrUtils.java    From SearchServices with GNU Lesser General Public License v3.0 4 votes vote down vote up
/**
 * Add a store to root.  
 * @param core
 * @param dataModel
 * @param rootNodeRef
 * @param txid
 * @param dbid
 * @param acltxid
 * @param aclid
 * @throws IOException
 */
public static void addStoreRoot(SolrCore core,
                                  AlfrescoSolrDataModel dataModel,
                                  NodeRef rootNodeRef,
                                  int txid,
                                  int dbid,
                                  int acltxid,
                                  int aclid) throws IOException
  {
      SolrServletRequest solrQueryRequest = null;
      try
      {
          AlfrescoCoreAdminHandler admin = (AlfrescoCoreAdminHandler) core.getCoreContainer().getMultiCoreHandler();
          SolrInformationServer solrInformationServer = (SolrInformationServer) admin.getInformationServers().get(core.getName());

          solrQueryRequest = new SolrServletRequest(core, null);
          AddUpdateCommand addDocCmd = new AddUpdateCommand(solrQueryRequest);
          addDocCmd.overwrite = true;
          addDocCmd.solrDoc = createDocument(dataModel, new Long(txid), new Long(dbid), rootNodeRef,
                  ContentModel.TYPE_STOREROOT, new QName[]{ContentModel.ASPECT_ROOT}, null, null, new Long(aclid),
                  new String[]{"/"}, "system", null, null, solrInformationServer);
          core.getUpdateHandler().addDoc(addDocCmd);
          addAcl(solrQueryRequest, core, dataModel, acltxid, aclid, 0, 0);
          AddUpdateCommand txCmd = new AddUpdateCommand(solrQueryRequest);
          txCmd.overwrite = true;
          SolrInputDocument input = new SolrInputDocument();
          String id = AlfrescoSolrDataModel.getTransactionDocumentId(new Long(txid));
          input.addField(FIELD_SOLR4_ID, id);
          input.addField(FIELD_VERSION, "0");
          input.addField(FIELD_TXID, txid);
          input.addField(FIELD_INTXID, txid);
          input.addField(FIELD_TXCOMMITTIME, (new Date()).getTime());
          input.addField(FIELD_DOC_TYPE, SolrInformationServer.DOC_TYPE_TX);
          txCmd.solrDoc = input;
          core.getUpdateHandler().addDoc(txCmd);
          core.getUpdateHandler().commit(new CommitUpdateCommand(solrQueryRequest, false));
      }
          finally
      {
          solrQueryRequest.close();
      }
}
 
Example #24
Source File: JsonLoaderTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testParsing() throws Exception
{
  SolrQueryRequest req = req();
  SolrQueryResponse rsp = new SolrQueryResponse();
  BufferingRequestProcessor p = new BufferingRequestProcessor(null);
  JsonLoader loader = new JsonLoader();
  loader.load(req, rsp, new ContentStreamBase.StringStream(input), p);

  assertEquals( 2, p.addCommands.size() );
  
  AddUpdateCommand add = p.addCommands.get(0);
  assertEquals("SolrInputDocument(fields: [bool=true, f0=v0, array=[aaa, bbb]])", add.solrDoc.toString());

  // 
  add = p.addCommands.get(1);
  assertEquals("SolrInputDocument(fields: [f1=[v1, v2], f2=null])", add.solrDoc.toString());
  assertFalse(add.overwrite);

  // parse the commit commands
  assertEquals( 2, p.commitCommands.size() );
  CommitUpdateCommand commit = p.commitCommands.get( 0 );
  assertFalse( commit.optimize );
  assertTrue( commit.waitSearcher );
  assertTrue( commit.openSearcher );

  commit = p.commitCommands.get( 1 );
  assertTrue( commit.optimize );
  assertFalse( commit.waitSearcher );
  assertFalse( commit.openSearcher );


  // DELETE COMMANDS
  assertEquals( 4, p.deleteCommands.size() );
  DeleteUpdateCommand delete = p.deleteCommands.get( 0 );
  assertEquals( delete.id, "ID" );
  assertNull( delete.query );
  assertEquals( delete.commitWithin, -1);
  
  delete = p.deleteCommands.get( 1 );
  assertEquals( delete.id, "ID" );
  assertNull( delete.query );
  assertEquals( delete.commitWithin, 500);
  
  delete = p.deleteCommands.get( 2 );
  assertNull( delete.id );
  assertEquals( delete.query, "QUERY" );
  assertEquals( delete.commitWithin, -1);
  
  delete = p.deleteCommands.get( 3 );
  assertNull( delete.id );
  assertEquals( delete.query, "QUERY" );
  assertEquals( delete.commitWithin, 500);

  // ROLLBACK COMMANDS
  assertEquals( 1, p.rollbackCommands.size() );

  req.close();
}
 
Example #25
Source File: DistributedZkUpdateProcessor.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
public void processCommit(CommitUpdateCommand cmd) throws IOException {
  clusterState = zkController.getClusterState();

  assert TestInjection.injectFailUpdateRequests();

  if (isReadOnly()) {
    throw new SolrException(ErrorCode.FORBIDDEN, "Collection " + collection + " is read-only.");
  }

  updateCommand = cmd;

  List<SolrCmdDistributor.Node> nodes = null;
  Replica leaderReplica = null;
  zkCheck();
  try {
    leaderReplica = zkController.getZkStateReader().getLeaderRetry(collection, cloudDesc.getShardId());
  } catch (InterruptedException e) {
    Thread.interrupted();
    throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE, "Exception finding leader for shard " + cloudDesc.getShardId(), e);
  }
  isLeader = leaderReplica.getName().equals(cloudDesc.getCoreNodeName());

  nodes = getCollectionUrls(collection, EnumSet.of(Replica.Type.TLOG,Replica.Type.NRT), true);
  if (nodes == null) {
    // This could happen if there are only pull replicas
    throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
        "Unable to distribute commit operation. No replicas available of types " + Replica.Type.TLOG + " or " + Replica.Type.NRT);
  }

  nodes.removeIf((node) -> node.getNodeProps().getNodeName().equals(zkController.getNodeName())
      && node.getNodeProps().getCoreName().equals(req.getCore().getName()));

  if (!isLeader && req.getParams().get(COMMIT_END_POINT, "").equals("replicas")) {
    if (replicaType == Replica.Type.PULL) {
      log.warn("Commit not supported on replicas of type {}", Replica.Type.PULL);
    } else if (replicaType == Replica.Type.NRT) {
      doLocalCommit(cmd);
    }
  } else {
    // zk
    ModifiableSolrParams params = new ModifiableSolrParams(filterParams(req.getParams()));

    List<SolrCmdDistributor.Node> useNodes = null;
    if (req.getParams().get(COMMIT_END_POINT) == null) {
      useNodes = nodes;
      params.set(DISTRIB_UPDATE_PARAM, DistribPhase.TOLEADER.toString());
      params.set(COMMIT_END_POINT, "leaders");
      if (useNodes != null) {
        params.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(
            zkController.getBaseUrl(), req.getCore().getName()));
        cmdDistrib.distribCommit(cmd, useNodes, params);
        cmdDistrib.blockAndDoRetries();
      }
    }

    if (isLeader) {
      params.set(DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());

      params.set(COMMIT_END_POINT, "replicas");

      useNodes = getReplicaNodesForLeader(cloudDesc.getShardId(), leaderReplica);

      if (useNodes != null) {
        params.set(DISTRIB_FROM, ZkCoreNodeProps.getCoreUrl(
            zkController.getBaseUrl(), req.getCore().getName()));

        cmdDistrib.distribCommit(cmd, useNodes, params);
      }

      doLocalCommit(cmd);

      if (useNodes != null) {
        cmdDistrib.blockAndDoRetries();
      }
    }
  }
}
 
Example #26
Source File: RecordingUpdateProcessorFactory.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
public void processCommit(CommitUpdateCommand cmd) throws IOException {
  record(cmd);
  super.processCommit(cmd);
}
 
Example #27
Source File: TrackingUpdateProcessorFactory.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
public void processCommit(CommitUpdateCommand cmd) throws IOException {
  record(cmd);
  super.processCommit(cmd);
}
 
Example #28
Source File: DocExpirationUpdateProcessorFactoryTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testAutomaticDeletes() throws Exception {

    // get a handle on our recorder

    UpdateRequestProcessorChain chain = 
      h.getCore().getUpdateProcessingChain("scheduled-delete");

    assertNotNull(chain);

    List<UpdateRequestProcessorFactory> factories = chain.getProcessors();
    assertEquals("did number of processors configured in chain get changed?", 
                 5, factories.size());
    assertTrue("Expected [1] RecordingUpdateProcessorFactory: " + factories.get(1).getClass(),
               factories.get(1) instanceof RecordingUpdateProcessorFactory);
    RecordingUpdateProcessorFactory recorder = 
      (RecordingUpdateProcessorFactory) factories.get(1);

    // now start recording, and monitor for the expected commands

    try {
      recorder.startRecording();
      
      // more then one iter to verify it's recurring
      final int numItersToCheck = 1 + RANDOM_MULTIPLIER;
      
      for (int i = 0; i < numItersToCheck; i++) { 
        UpdateCommand tmp;
        
        // be generous in how long we wait, some jenkins machines are slooooow
        tmp = recorder.commandQueue.poll(30, TimeUnit.SECONDS);
        
        // we can be confident in the order because DocExpirationUpdateProcessorFactory
        // uses the same request for both the delete & the commit -- and both 
        // RecordingUpdateProcessorFactory's getInstance & startRecording methods are 
        // synchronized.  So it should not be possible to start recording in the 
        // middle of the two commands
        assertTrue("expected DeleteUpdateCommand: " + tmp.getClass(),
                   tmp instanceof DeleteUpdateCommand);
        
        DeleteUpdateCommand delete = (DeleteUpdateCommand) tmp;
        assertFalse(delete.isDeleteById());
        assertNotNull(delete.getQuery());
        assertTrue(delete.getQuery(), 
                   delete.getQuery().startsWith("{!cache=false}eXpField_tdt:[* TO "));
        
        // commit should be immediately after the delete
        tmp = recorder.commandQueue.poll(5, TimeUnit.SECONDS);
        assertTrue("expected CommitUpdateCommand: " + tmp.getClass(),
                   tmp instanceof CommitUpdateCommand);
        
        CommitUpdateCommand commit = (CommitUpdateCommand) tmp;
        assertTrue(commit.softCommit);
        assertTrue(commit.openSearcher);
      } 
    } finally {
      recorder.stopRecording();
    }
  }
 
Example #29
Source File: AbstractDataImportHandlerTestCase.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
public void processCommit(CommitUpdateCommand cmd) throws IOException {
  processCommitCalled = true;
  super.processCommit(cmd);
}
 
Example #30
Source File: ReplicateFromLeader.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
/**
 * Start a replication handler thread that will periodically pull indices from the shard leader
 * @param switchTransactionLog if true, ReplicationHandler will rotate the transaction log once
 * the replication is done
 */
public void startReplication(boolean switchTransactionLog) throws InterruptedException {
  try (SolrCore core = cc.getCore(coreName)) {
    if (core == null) {
      if (cc.isShutDown()) {
        return;
      } else {
        throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "SolrCore not found:" + coreName + " in " + cc.getLoadedCoreNames());
      }
    }
    SolrConfig.UpdateHandlerInfo uinfo = core.getSolrConfig().getUpdateHandlerInfo();
    String pollIntervalStr = "00:00:03";
    if (System.getProperty("jetty.testMode") != null) {
      pollIntervalStr = "00:00:01";
    }
    if (uinfo.autoCommmitMaxTime != -1) {
      pollIntervalStr = toPollIntervalStr(uinfo.autoCommmitMaxTime/2);
    } else if (uinfo.autoSoftCommmitMaxTime != -1) {
      pollIntervalStr = toPollIntervalStr(uinfo.autoSoftCommmitMaxTime/2);
    }
    log.info("Will start replication from leader with poll interval: {}", pollIntervalStr );

    NamedList<Object> slaveConfig = new NamedList<>();
    slaveConfig.add("fetchFromLeader", Boolean.TRUE);
    slaveConfig.add(ReplicationHandler.SKIP_COMMIT_ON_MASTER_VERSION_ZERO, switchTransactionLog);
    slaveConfig.add("pollInterval", pollIntervalStr);
    NamedList<Object> replicationConfig = new NamedList<>();
    replicationConfig.add("slave", slaveConfig);

    String lastCommitVersion = getCommitVersion(core);
    if (lastCommitVersion != null) {
      lastVersion = Long.parseLong(lastCommitVersion);
    }

    replicationProcess = new ReplicationHandler();
    if (switchTransactionLog) {
      replicationProcess.setPollListener((solrCore, fetchResult) -> {
        if (fetchResult == IndexFetcher.IndexFetchResult.INDEX_FETCH_SUCCESS) {
          String commitVersion = getCommitVersion(core);
          if (commitVersion == null) return;
          if (Long.parseLong(commitVersion) == lastVersion) return;
          UpdateLog updateLog = solrCore.getUpdateHandler().getUpdateLog();
          SolrQueryRequest req = new LocalSolrQueryRequest(core,
              new ModifiableSolrParams());
          CommitUpdateCommand cuc = new CommitUpdateCommand(req, false);
          cuc.setVersion(Long.parseLong(commitVersion));
          updateLog.commitAndSwitchToNewTlog(cuc);
          lastVersion = Long.parseLong(commitVersion);
        }
      });
    }
    replicationProcess.init(replicationConfig);
    replicationProcess.inform(core);
  }
}