Java Code Examples for org.apache.solr.client.solrj.impl.CloudSolrClient#add()

The following examples show how to use org.apache.solr.client.solrj.impl.CloudSolrClient#add() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: AbstractCloudBackupRestoreTestCase.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private int indexDocs(String collectionName, boolean useUUID) throws Exception {
  Random random = new Random(docsSeed);// use a constant seed for the whole test run so that we can easily re-index.
  int numDocs = random.nextInt(100);
  if (numDocs == 0) {
    log.info("Indexing ZERO test docs");
    return 0;
  }

  List<SolrInputDocument> docs = new ArrayList<>(numDocs);
  for (int i=0; i<numDocs; i++) {
    SolrInputDocument doc = new SolrInputDocument();
    doc.addField("id", ((useUUID == true) ? java.util.UUID.randomUUID().toString() : i));
    doc.addField("shard_s", "shard" + (1 + random.nextInt(NUM_SHARDS))); // for implicit router
    docs.add(doc);
  }

  CloudSolrClient client = cluster.getSolrClient();
  client.add(collectionName, docs); //batch
  client.commit(collectionName);

  log.info("Indexed {} docs to collection: {}", numDocs, collectionName);

  return numDocs;
}
 
Example 2
Source File: TestManagedSchemaAPI.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void testModifyField(String collection) throws IOException, SolrServerException {
  CloudSolrClient cloudClient = cluster.getSolrClient();

  SolrInputDocument doc = new SolrInputDocument("id", "3");
  cloudClient.add(collection, doc);
  cloudClient.commit(collection);

  String fieldName = "id";
  SchemaRequest.Field getFieldRequest = new SchemaRequest.Field(fieldName);
  SchemaResponse.FieldResponse getFieldResponse = getFieldRequest.process(cloudClient, collection);
  Map<String, Object> field = getFieldResponse.getField();
  field.put("docValues", true);
  SchemaRequest.ReplaceField replaceRequest = new SchemaRequest.ReplaceField(field);
  SchemaResponse.UpdateResponse replaceResponse = replaceRequest.process(cloudClient, collection);
  assertNull(replaceResponse.getResponse().get("errors"));
  CollectionAdminRequest.Reload reloadRequest = CollectionAdminRequest.reloadCollection(collection);
  CollectionAdminResponse response = reloadRequest.process(cloudClient);
  assertEquals(0, response.getStatus());
  assertTrue(response.isSuccess());

}
 
Example 3
Source File: TestSolrCloudWithHadoopAuthPlugin.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void testCollectionCreateSearchDelete() throws Exception {
  CloudSolrClient solrClient = cluster.getSolrClient();
  String collectionName = "testkerberoscollection";

  // create collection
  CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, "conf1",
      NUM_SHARDS, REPLICATION_FACTOR);
  create.process(solrClient);
  // The metrics counter for wrong credentials here really just means  
  assertAuthMetricsMinimums(6, 3, 0, 3, 0, 0);

  SolrInputDocument doc = new SolrInputDocument();
  doc.setField("id", "1");
  solrClient.add(collectionName, doc);
  solrClient.commit(collectionName);
  assertAuthMetricsMinimums(10, 5, 0, 5, 0, 0);

  SolrQuery query = new SolrQuery();
  query.setQuery("*:*");
  QueryResponse rsp = solrClient.query(collectionName, query);
  assertEquals(1, rsp.getResults().getNumFound());

  CollectionAdminRequest.Delete deleteReq = CollectionAdminRequest.deleteCollection(collectionName);
  deleteReq.process(solrClient);
  AbstractDistribZkTestBase.waitForCollectionToDisappear(collectionName,
      solrClient.getZkStateReader(), true, 330);
  assertAuthMetricsMinimums(14, 8, 0, 6, 0, 0);
}
 
Example 4
Source File: CdcrReplicationHandlerTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Test the scenario where the slave is killed before receiving a commit. This creates a truncated tlog
 * file on the slave node. The replication strategy should detect this truncated file, and fetch the
 * non-truncated file from the leader.
 */
@Test
@ShardsFixed(num = 2)
public void testPartialReplicationWithTruncatedTlog() throws Exception {
  CloudSolrClient client = createCloudClient(SOURCE_COLLECTION);
  List<CloudJettyRunner> slaves = this.getShardToSlaveJetty(SOURCE_COLLECTION, SHARD1);

  try {
    for (int i = 0; i < 10; i++) {
      for (int j = i * 20; j < (i * 20) + 20; j++) {
        client.add(getDoc(id, Integer.toString(j)));

        // Stop the slave in the middle of a batch to create a truncated tlog on the slave
        if (j == 45) {
          slaves.get(0).jetty.stop();
        }

      }
      commit(SOURCE_COLLECTION);
    }
  } finally {
    client.close();
  }

  assertNumDocs(200, SOURCE_COLLECTION);

  // Restart the slave node to trigger Replication recovery
  this.restartServer(slaves.get(0));

  // at this stage, the slave should have replicated the 5 missing tlog files
  this.assertUpdateLogsEquals(SOURCE_COLLECTION, 10);
}
 
Example 5
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void index(String collection, SolrInputDocument doc) throws IOException, SolrServerException {
  CloudSolrClient client = createCloudClient(collection);
  try {
    client.add(doc);
    client.commit(true, true);
  } finally {
    client.close();
  }
}
 
Example 6
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void index(String collection, List<SolrInputDocument> docs) throws IOException, SolrServerException {
  CloudSolrClient client = createCloudClient(collection);
  try {
    client.add(docs);
    client.commit(true, true);
  } finally {
    client.close();
  }
}
 
Example 7
Source File: CdcrTestsUtil.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public static void index(MiniSolrCloudCluster cluster, String collection, SolrInputDocument doc, boolean doCommit) throws IOException, SolrServerException {
  CloudSolrClient client = createCloudClient(cluster, collection);
  try {
    client.add(doc);
    if (doCommit) {
      client.commit(true, true);
    } else {
      client.commit(true, false);
    }
  } finally {
    client.close();
  }
}
 
Example 8
Source File: SegmentTerminateEarlyTestState.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
void addDocuments(CloudSolrClient cloudSolrClient,
    int numCommits, int numDocsPerCommit, boolean optimize) throws Exception {
  for (int cc = 1; cc <= numCommits; ++cc) {
    for (int nn = 1; nn <= numDocsPerCommit; ++nn) {
      ++numDocs;
      final Integer docKey = numDocs;
      SolrInputDocument doc = new SolrInputDocument();
      doc.setField(KEY_FIELD, ""+docKey);
      final int MM = rand.nextInt(60); // minutes
      if (minTimestampMM == null || MM <= minTimestampMM.intValue()) {
        if (minTimestampMM != null && MM < minTimestampMM.intValue()) {
          minTimestampDocKeys.clear();
        }
        minTimestampMM = MM;
        minTimestampDocKeys.add(docKey);
      }
      if (maxTimestampMM == null || maxTimestampMM.intValue() <= MM) {
        if (maxTimestampMM != null && maxTimestampMM.intValue() < MM) {
          maxTimestampDocKeys.clear();
        }
        maxTimestampMM = MM;
        maxTimestampDocKeys.add(docKey);
      }
      doc.setField(TIMESTAMP_FIELD, MM);
      doc.setField(ODD_FIELD, ""+(numDocs % 2));
      doc.setField(QUAD_FIELD, ""+(numDocs % 4)+1);
      cloudSolrClient.add(doc);
    }
    cloudSolrClient.commit();
  }
  if (optimize) {
    cloudSolrClient.optimize();
  }
}
 
Example 9
Source File: QuerySolrIT.java    From nifi with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setup() throws IOException, SolrServerException {
    CloudSolrClient solrClient = createSolrClient();
    Path currentDir = Paths.get(ZK_CONFIG_PATH);
    ZkClientClusterStateProvider stateProvider = new ZkClientClusterStateProvider(SOLR_LOCATION);
    stateProvider.uploadConfig(currentDir, ZK_CONFIG_NAME);
    solrClient.setDefaultCollection(SOLR_COLLECTION);

    if (!solrClient.getZkStateReader().getClusterState().hasCollection(SOLR_COLLECTION)) {
        CollectionAdminRequest.Create createCollection = CollectionAdminRequest.createCollection(SOLR_COLLECTION, ZK_CONFIG_NAME, 1, 1);
        createCollection.process(solrClient);
    } else {
        solrClient.deleteByQuery("*:*");
    }

    for (int i = 0; i < 10; i++) {
        SolrInputDocument doc = new SolrInputDocument();
        doc.addField("id", "doc" + i);
        Date date = new Date();
        doc.addField("created", DATE_FORMAT.format(date));
        doc.addField("string_single", "single" + i + ".1");
        doc.addField("string_multi", "multi" + i + ".1");
        doc.addField("string_multi", "multi" + i + ".2");
        doc.addField("integer_single", i);
        doc.addField("integer_multi", 1);
        doc.addField("integer_multi", 2);
        doc.addField("integer_multi", 3);
        doc.addField("double_single", 0.5 + i);

        solrClient.add(doc);
    }
    solrClient.commit();
}
 
Example 10
Source File: SolrJmxReporterCloudTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
public void testJmxReporter() throws Exception {
  CollectionAdminRequest.reloadCollection(COLLECTION).processAndWait(cluster.getSolrClient(), 60);
  CloudSolrClient solrClient = cluster.getSolrClient();
  // index some docs
  for (int i = 0; i < 100; i++) {
    SolrInputDocument doc = new SolrInputDocument();
    doc.addField("id", "id-" + i);
    solrClient.add(COLLECTION, doc);
  }
  solrClient.commit(COLLECTION);
  // make sure searcher is present
  solrClient.query(COLLECTION, params(CommonParams.Q, "*:*"));

  for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
    SolrMetricManager manager = runner.getCoreContainer().getMetricManager();
    for (String registry : manager.registryNames()) {
      Map<String, SolrMetricReporter> reporters = manager.getReporters(registry);
      long jmxReporters = reporters.entrySet().stream().filter(e -> e.getValue() instanceof SolrJmxReporter).count();
      reporters.forEach((k, v) -> {
        if (!(v instanceof SolrJmxReporter)) {
          return;
        }
        if (!((SolrJmxReporter)v).getDomain().startsWith("solr.core")) {
          return;
        }
        if (!((SolrJmxReporter)v).isActive()) {
          return;
        }
        QueryExp exp = Query.eq(Query.attr(JmxMetricsReporter.INSTANCE_TAG), Query.value(Integer.toHexString(v.hashCode())));
        Set<ObjectInstance> beans = mBeanServer.queryMBeans(null, exp);
        if (((SolrJmxReporter) v).isStarted() && beans.isEmpty() && jmxReporters < 2) {
          if (log.isInfoEnabled()) {
            log.info("DocCollection: {}", getCollectionState(COLLECTION));
          }
          fail("JMX reporter " + k + " for registry " + registry + " failed to register any beans!");
        } else {
          Set<String> categories = new HashSet<>();
          beans.forEach(bean -> {
            String cat = bean.getObjectName().getKeyProperty("category");
            if (cat != null) {
              categories.add(cat);
            }
          });
          log.info("Registered categories: {}", categories);
          assertTrue("Too few categories: " + categories, categories.size() > 5);
        }
      });
    }
  }
}
 
Example 11
Source File: TestCloudRecovery.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
// commented 4-Sep-2018 @BadApple(bugUrl="https://issues.apache.org/jira/browse/SOLR-12028") // added 20-Jul-2018
public void leaderRecoverFromLogOnStartupTest() throws Exception {
  AtomicInteger countReplayLog = new AtomicInteger(0);
  TestInjection.skipIndexWriterCommitOnClose = true;
  UpdateLog.testing_logReplayFinishHook = countReplayLog::incrementAndGet;

  CloudSolrClient cloudClient = cluster.getSolrClient();
  cloudClient.add(COLLECTION, sdoc("id", "1"));
  cloudClient.add(COLLECTION, sdoc("id", "2"));
  cloudClient.add(COLLECTION, sdoc("id", "3"));
  cloudClient.add(COLLECTION, sdoc("id", "4"));

  ModifiableSolrParams params = new ModifiableSolrParams();
  params.set("q", "*:*");
  QueryResponse resp = cloudClient.query(COLLECTION, params);
  assertEquals(0, resp.getResults().getNumFound());

  ChaosMonkey.stop(cluster.getJettySolrRunners());

  
  for (JettySolrRunner jettySolrRunner : cluster.getJettySolrRunners()) {
    cluster.waitForJettyToStop(jettySolrRunner);
  }
  assertTrue("Timeout waiting for all not live", ClusterStateUtil.waitForAllReplicasNotLive(cloudClient.getZkStateReader(), 45000));
  ChaosMonkey.start(cluster.getJettySolrRunners());
  
  cluster.waitForAllNodes(30);
  
  assertTrue("Timeout waiting for all live and active", ClusterStateUtil.waitForAllActiveAndLiveReplicas(cloudClient.getZkStateReader(), COLLECTION, 120000));

  resp = cloudClient.query(COLLECTION, params);
  assertEquals(4, resp.getResults().getNumFound());
  // Make sure all nodes is recover from tlog
  if (onlyLeaderIndexes) {
    // Leader election can be kicked off, so 2 tlog replicas will replay its tlog before becoming new leader
    assertTrue( countReplayLog.get() >=2);
  } else {
    assertEquals(4, countReplayLog.get());
  }

  // check metrics
  int replicationCount = 0;
  int errorsCount = 0;
  int skippedCount = 0;
  for (JettySolrRunner jetty : cluster.getJettySolrRunners()) {
    SolrMetricManager manager = jetty.getCoreContainer().getMetricManager();
    List<String> registryNames = manager.registryNames().stream()
        .filter(s -> s.startsWith("solr.core.")).collect(Collectors.toList());
    for (String registry : registryNames) {
      Map<String, Metric> metrics = manager.registry(registry).getMetrics();
      Timer timer = (Timer)metrics.get("REPLICATION.peerSync.time");
      Counter counter = (Counter)metrics.get("REPLICATION.peerSync.errors");
      Counter skipped = (Counter)metrics.get("REPLICATION.peerSync.skipped");
      replicationCount += timer.getCount();
      errorsCount += counter.getCount();
      skippedCount += skipped.getCount();
    }
  }
  if (onlyLeaderIndexes) {
    assertTrue(replicationCount >= 2);
  } else {
    assertEquals(2, replicationCount);
  }
}
 
Example 12
Source File: TestLeaderElectionWithEmptyReplica.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
public void test() throws Exception {
  CloudSolrClient solrClient = cluster.getSolrClient();
  solrClient.setDefaultCollection(COLLECTION_NAME);
  for (int i=0; i<10; i++)  {
    SolrInputDocument doc = new SolrInputDocument();
    doc.addField("id", String.valueOf(i));
    solrClient.add(doc);
  }
  solrClient.commit();

  // find the leader node
  Replica replica = solrClient.getZkStateReader().getLeaderRetry(COLLECTION_NAME, "shard1");
  JettySolrRunner replicaJetty = null;
  List<JettySolrRunner> jettySolrRunners = cluster.getJettySolrRunners();
  for (JettySolrRunner jettySolrRunner : jettySolrRunners) {
    int port = jettySolrRunner.getBaseUrl().getPort();
    if (replica.getStr(BASE_URL_PROP).contains(":" + port))  {
      replicaJetty = jettySolrRunner;
      break;
    }
  }

  // kill the leader
  replicaJetty.stop();

  // add a replica (asynchronously)
  CollectionAdminRequest.AddReplica addReplica = CollectionAdminRequest.addReplicaToShard(COLLECTION_NAME, "shard1");
  String asyncId = addReplica.processAsync(solrClient);

  // wait a bit
  Thread.sleep(1000);

  // bring the old leader node back up
  replicaJetty.start();

  // wait until everyone is active
  solrClient.waitForState(COLLECTION_NAME, DEFAULT_TIMEOUT, TimeUnit.SECONDS,
      (n, c) -> DocCollection.isFullyActive(n, c, 1, 2));

  // now query each replica and check for consistency
  assertConsistentReplicas(solrClient, solrClient.getZkStateReader().getClusterState().getCollection(COLLECTION_NAME).getSlice("shard1"));

  // sanity check that documents still exist
  QueryResponse response = solrClient.query(new SolrQuery("*:*"));
  assertEquals("Indexed documents not found", 10, response.getResults().getNumFound());
}
 
Example 13
Source File: TestCloudSearcherWarming.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
public void testRepFactor1LeaderStartup() throws Exception {

  CloudSolrClient solrClient = cluster.getSolrClient();

  String collectionName = "testRepFactor1LeaderStartup";
  CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName, 1, 1)
      .setCreateNodeSet(cluster.getJettySolrRunner(0).getNodeName());
  create.process(solrClient);

 cluster.waitForActiveCollection(collectionName, 1, 1);

  solrClient.setDefaultCollection(collectionName);

  String addListenerCommand = "{" +
      "'add-listener' : {'name':'newSearcherListener','event':'newSearcher', 'class':'" + SleepingSolrEventListener.class.getName() + "'}" +
      "'add-listener' : {'name':'firstSearcherListener','event':'firstSearcher', 'class':'" + SleepingSolrEventListener.class.getName() + "'}" +
      "}";

  ConfigRequest request = new ConfigRequest(addListenerCommand);
  solrClient.request(request);

  solrClient.add(new SolrInputDocument("id", "1"));
  solrClient.commit();

  AtomicInteger expectedDocs = new AtomicInteger(1);
  AtomicReference<String> failingCoreNodeName = new AtomicReference<>();
  CollectionStateWatcher stateWatcher = createActiveReplicaSearcherWatcher(expectedDocs, failingCoreNodeName);

  JettySolrRunner runner = cluster.getJettySolrRunner(0);
  runner.stop();
  
  cluster.waitForJettyToStop(runner);
  // check waitForState only after we are sure the node has shutdown and have forced an update to liveNodes
  // ie: workaround SOLR-13490
  cluster.getSolrClient().getZkStateReader().updateLiveNodes();
  waitForState("jetty count:" + cluster.getJettySolrRunners().size(), collectionName, clusterShape(1, 0));
  
  // restart
  sleepTime.set(1000);
  runner.start();
  cluster.waitForAllNodes(30);
  cluster.getSolrClient().getZkStateReader().registerCollectionStateWatcher(collectionName, stateWatcher);
  cluster.waitForActiveCollection(collectionName, 1, 1);
  assertNull("No replica should have been active without registering a searcher, found: " + failingCoreNodeName.get(), failingCoreNodeName.get());
  cluster.getSolrClient().getZkStateReader().removeCollectionStateWatcher(collectionName, stateWatcher);
}
 
Example 14
Source File: MtasSolrTestDistributedSearchConsistency.java    From mtas with Apache License 2.0 4 votes vote down vote up
/**
 * Creates the cloud.
 */
private static void createCloud() {
  Path dataPath = Paths.get("src" + File.separator + "test" + File.separator
      + "resources" + File.separator + "data");
  String solrxml = MiniSolrCloudCluster.DEFAULT_CLOUD_SOLR_XML;
  JettyConfig jettyConfig = JettyConfig.builder().setContext("/solr").build();
  File cloudBase = Files.createTempDir();
  cloudBaseDir = cloudBase.toPath();
  // create subdirectories
  Path clusterDir = cloudBaseDir.resolve("cluster");
  Path logDir = cloudBaseDir.resolve("log");
  if (clusterDir.toFile().mkdir() && logDir.toFile().mkdir()) {
    // set log directory
    System.setProperty("solr.log.dir", logDir.toAbsolutePath().toString());
    try {
      cloudCluster = new MiniSolrCloudCluster(1, clusterDir, solrxml,
          jettyConfig);
      CloudSolrClient client = cloudCluster.getSolrClient();
      client.connect();
      createCloudCollection(COLLECTION_ALL_OPTIMIZED, 1, 1,
          dataPath.resolve("conf"));
      createCloudCollection(COLLECTION_ALL_MULTIPLE_SEGMENTS, 1, 1,
          dataPath.resolve("conf"));
      createCloudCollection(COLLECTION_PART1_OPTIMIZED, 1, 1,
          dataPath.resolve("conf"));
      createCloudCollection(COLLECTION_PART2_MULTIPLE_SEGMENTS, 1, 1,
          dataPath.resolve("conf"));
      createCloudCollection(COLLECTION_DISTRIBUTED, 1, 1,
          dataPath.resolve("conf"));

      // collection1
      client.add(COLLECTION_ALL_OPTIMIZED, solrDocuments.get(1));
      client.add(COLLECTION_ALL_OPTIMIZED, solrDocuments.get(2));
      client.add(COLLECTION_ALL_OPTIMIZED, solrDocuments.get(3));
      client.commit(COLLECTION_ALL_OPTIMIZED);
      client.optimize(COLLECTION_ALL_OPTIMIZED);
      // collection2
      client.add(COLLECTION_ALL_MULTIPLE_SEGMENTS, solrDocuments.get(1));
      client.commit(COLLECTION_ALL_MULTIPLE_SEGMENTS);
      client.add(COLLECTION_ALL_MULTIPLE_SEGMENTS, solrDocuments.get(2));
      client.add(COLLECTION_ALL_MULTIPLE_SEGMENTS, solrDocuments.get(3));
      client.commit(COLLECTION_ALL_MULTIPLE_SEGMENTS);
      // collection3
      client.add(COLLECTION_PART1_OPTIMIZED, solrDocuments.get(1));
      client.commit(COLLECTION_PART1_OPTIMIZED);
      // collection4
      client.add(COLLECTION_PART2_MULTIPLE_SEGMENTS, solrDocuments.get(2));
      client.add(COLLECTION_PART2_MULTIPLE_SEGMENTS, solrDocuments.get(3));
      client.commit(COLLECTION_PART2_MULTIPLE_SEGMENTS);
    } catch (Exception e) {
      e.printStackTrace();
      log.error(e);
    }
  } else {
    log.error("couldn't create directories");
  }
}