Java Code Examples for org.apache.solr.client.solrj.impl.CloudSolrClient#close()

The following examples show how to use org.apache.solr.client.solrj.impl.CloudSolrClient#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Assert the number of documents in a given collection
 */
protected void assertNumDocs(int expectedNumDocs, String collection)
throws SolrServerException, IOException, InterruptedException {
  CloudSolrClient client = createCloudClient(collection);
  try {
    int cnt = 30; // timeout after 15 seconds
    AssertionError lastAssertionError = null;
    while (cnt > 0) {
      try {
        assertEquals(expectedNumDocs, client.query(new SolrQuery("*:*")).getResults().getNumFound());
        return;
      }
      catch (AssertionError e) {
        lastAssertionError = e;
        cnt--;
        Thread.sleep(500);
      }
    }
    throw new AssertionError("Timeout while trying to assert number of documents @ " + collection, lastAssertionError);
  } finally {
    client.close();
  }
}
 
Example 2
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Create a new collection through the Collection API. It enforces the use of one max shard per node.
 * It will define the nodes to spread the new collection across by using the mapping {@link #collectionToNodeNames},
 * to ensure that a node will not host more than one core (which will create problem when trying to restart servers).
 */
private void createCollection(String name) throws Exception {
  CloudSolrClient client = createCloudClient(null);
  try {
    // Create the target collection
    Map<String, List<Integer>> collectionInfos = new HashMap<>();
    int maxShardsPerNode = 1;

    StringBuilder sb = new StringBuilder();
    for (String nodeName : collectionToNodeNames.get(name)) {
      sb.append(nodeName);
      sb.append(',');
    }
    sb.deleteCharAt(sb.length() - 1);

    createCollection(collectionInfos, name, shardCount, replicationFactor, maxShardsPerNode, client, sb.toString());
  } finally {
    client.close();
  }
}
 
Example 3
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Asserts that the collection has the correct number of shards and replicas
 */
protected void assertCollectionExpectations(String collectionName) throws Exception {
  CloudSolrClient client = this.createCloudClient(null);
  try {
    client.connect();
    ClusterState clusterState = client.getZkStateReader().getClusterState();

    assertTrue("Could not find new collection " + collectionName, clusterState.hasCollection(collectionName));
    Map<String, Slice> shards = clusterState.getCollection(collectionName).getSlicesMap();
    // did we find expectedSlices shards/shards?
    assertEquals("Found new collection " + collectionName + ", but mismatch on number of shards.", shardCount, shards.size());
    int totalShards = 0;
    for (String shardName : shards.keySet()) {
      totalShards += shards.get(shardName).getReplicas().size();
    }
    int expectedTotalShards = shardCount * replicationFactor;
    assertEquals("Found new collection " + collectionName + " with correct number of shards, but mismatch on number " +
        "of shards.", expectedTotalShards, totalShards);
  } finally {
    client.close();
  }
}
 
Example 4
Source File: SolrCLI.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void waitToSeeLiveNodes(int maxWaitSecs, String zkHost, int numNodes) {
  CloudSolrClient cloudClient = null;
  try {
    cloudClient = new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty())
        .build();
    cloudClient.connect();
    Set<String> liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
    int numLiveNodes = (liveNodes != null) ? liveNodes.size() : 0;
    long timeout = System.nanoTime() + TimeUnit.NANOSECONDS.convert(maxWaitSecs, TimeUnit.SECONDS);
    while (System.nanoTime() < timeout && numLiveNodes < numNodes) {
      echo("\nWaiting up to "+maxWaitSecs+" seconds to see "+
          (numNodes-numLiveNodes)+" more nodes join the SolrCloud cluster ...");
      try {
        Thread.sleep(2000);
      } catch (InterruptedException ie) {
        Thread.interrupted();
      }
      liveNodes = cloudClient.getZkStateReader().getClusterState().getLiveNodes();
      numLiveNodes = (liveNodes != null) ? liveNodes.size() : 0;
    }
    if (numLiveNodes < numNodes) {
      echo("\nWARNING: Only "+numLiveNodes+" of "+numNodes+
          " are active in the cluster after "+maxWaitSecs+
          " seconds! Please check the solr.log for each node to look for errors.\n");
    }
  } catch (Exception exc) {
    CLIO.err("Failed to see if "+numNodes+" joined the SolrCloud cluster due to: "+exc);
  } finally {
    if (cloudClient != null) {
      try {
        cloudClient.close();
      } catch (Exception ignore) {}
    }
  }
}
 
Example 5
Source File: CdcrReplicationHandlerTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Test the scenario where the slave is killed before receiving a commit. This creates a truncated tlog
 * file on the slave node. The replication strategy should detect this truncated file, and fetch the
 * non-truncated file from the leader.
 */
@Test
@ShardsFixed(num = 2)
public void testPartialReplicationWithTruncatedTlog() throws Exception {
  CloudSolrClient client = createCloudClient(SOURCE_COLLECTION);
  List<CloudJettyRunner> slaves = this.getShardToSlaveJetty(SOURCE_COLLECTION, SHARD1);

  try {
    for (int i = 0; i < 10; i++) {
      for (int j = i * 20; j < (i * 20) + 20; j++) {
        client.add(getDoc(id, Integer.toString(j)));

        // Stop the slave in the middle of a batch to create a truncated tlog on the slave
        if (j == 45) {
          slaves.get(0).jetty.stop();
        }

      }
      commit(SOURCE_COLLECTION);
    }
  } finally {
    client.close();
  }

  assertNumDocs(200, SOURCE_COLLECTION);

  // Restart the slave node to trigger Replication recovery
  this.restartServer(slaves.get(0));

  // at this stage, the slave should have replicated the 5 missing tlog files
  this.assertUpdateLogsEquals(SOURCE_COLLECTION, 10);
}
 
Example 6
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void index(String collection, SolrInputDocument doc) throws IOException, SolrServerException {
  CloudSolrClient client = createCloudClient(collection);
  try {
    client.add(doc);
    client.commit(true, true);
  } finally {
    client.close();
  }
}
 
Example 7
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void index(String collection, List<SolrInputDocument> docs) throws IOException, SolrServerException {
  CloudSolrClient client = createCloudClient(collection);
  try {
    client.add(docs);
    client.commit(true, true);
  } finally {
    client.close();
  }
}
 
Example 8
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void deleteById(String collection, List<String> ids) throws IOException, SolrServerException {
  CloudSolrClient client = createCloudClient(collection);
  try {
    client.deleteById(ids);
    client.commit(true, true);
  } finally {
    client.close();
  }
}
 
Example 9
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void deleteByQuery(String collection, String q) throws IOException, SolrServerException {
  CloudSolrClient client = createCloudClient(collection);
  try {
    client.deleteByQuery(q);
    client.commit(true, true);
  } finally {
    client.close();
  }
}
 
Example 10
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Invokes a commit on the given collection.
 */
protected void commit(String collection) throws IOException, SolrServerException {
  CloudSolrClient client = createCloudClient(collection);
  try {
    client.commit(true, true);
  } finally {
    client.close();
  }
}
 
Example 11
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void waitForCollectionToDisappear(String collection) throws Exception {
  CloudSolrClient client = this.createCloudClient(null);
  try {
    client.connect();
    ZkStateReader zkStateReader = client.getZkStateReader();
    AbstractDistribZkTestBase.waitForCollectionToDisappear(collection, zkStateReader, true, 15);
  } finally {
    client.close();
  }
}
 
Example 12
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void waitForRecoveriesToFinish(String collection, boolean verbose) throws Exception {
  CloudSolrClient client = this.createCloudClient(null);
  try {
    client.connect();
    ZkStateReader zkStateReader = client.getZkStateReader();
    super.waitForRecoveriesToFinish(collection, zkStateReader, verbose);
  } finally {
    client.close();
  }
}
 
Example 13
Source File: CdcrTestsUtil.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public static void index(MiniSolrCloudCluster cluster, String collection, SolrInputDocument doc, boolean doCommit) throws IOException, SolrServerException {
  CloudSolrClient client = createCloudClient(cluster, collection);
  try {
    client.add(doc);
    if (doCommit) {
      client.commit(true, true);
    } else {
      client.commit(true, false);
    }
  } finally {
    client.close();
  }
}
 
Example 14
Source File: QuerySolrIT.java    From nifi with Apache License 2.0 5 votes vote down vote up
@AfterClass
public static void teardown() {
    try {
        CloudSolrClient solrClient = createSolrClient();
        CollectionAdminRequest.Delete deleteCollection = CollectionAdminRequest.deleteCollection(SOLR_COLLECTION);
        deleteCollection.process(solrClient);
        solrClient.close();
    } catch (Exception e) {
    }
}
 
Example 15
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
/**
 * Updates the mappings between the jetty's instances and the zookeeper cluster state.
 */
protected void updateMappingsFromZk(String collection) throws Exception {
  List<CloudJettyRunner> cloudJettys = new ArrayList<>();
  Map<String, List<CloudJettyRunner>> shardToJetty = new HashMap<>();
  Map<String, CloudJettyRunner> shardToLeaderJetty = new HashMap<>();

  CloudSolrClient cloudClient = this.createCloudClient(null);
  try {
    cloudClient.connect();
    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
    ClusterState clusterState = zkStateReader.getClusterState();
    DocCollection coll = clusterState.getCollection(collection);

    for (JettySolrRunner jetty : jettys) {
      int port = jetty.getLocalPort();
      if (port == -1) {
        throw new RuntimeException("Cannot find the port for jetty");
      }

      nextJetty:
      for (Slice shard : coll.getSlices()) {
        Set<Map.Entry<String, Replica>> entries = shard.getReplicasMap().entrySet();
        for (Map.Entry<String, Replica> entry : entries) {
          Replica replica = entry.getValue();
          if (replica.getStr(ZkStateReader.BASE_URL_PROP).contains(":" + port)) {
            if (!shardToJetty.containsKey(shard.getName())) {
              shardToJetty.put(shard.getName(), new ArrayList<CloudJettyRunner>());
            }
            boolean isLeader = shard.getLeader() == replica;
            CloudJettyRunner cjr = new CloudJettyRunner(jetty, replica, collection, shard.getName(), entry.getKey());
            shardToJetty.get(shard.getName()).add(cjr);
            if (isLeader) {
              shardToLeaderJetty.put(shard.getName(), cjr);
            }
            cloudJettys.add(cjr);
            break nextJetty;
          }
        }
      }
    }

    List<CloudJettyRunner> oldRunners = this.cloudJettys.putIfAbsent(collection, cloudJettys);
    if (oldRunners != null)  {
      // must close resources for the old entries
      for (CloudJettyRunner oldRunner : oldRunners) {
        IOUtils.closeQuietly(oldRunner.client);
      }
    }

    this.cloudJettys.put(collection, cloudJettys);
    this.shardToJetty.put(collection, shardToJetty);
    this.shardToLeaderJetty.put(collection, shardToLeaderJetty);
  } finally {
    cloudClient.close();
  }
}
 
Example 16
Source File: ChronixSolrCloudStorage.java    From chronix.spark with Apache License 2.0 4 votes vote down vote up
/**
 * Returns the list of shards of the default collection.
 *
 * @param zkHost            ZooKeeper URL
 * @param chronixCollection Solr collection name for chronix time series data
 * @return the list of shards of the default collection
 */
public List<String> getShardList(String zkHost, String chronixCollection) throws IOException {

    CloudSolrClient cloudSolrClient = new CloudSolrClient(zkHost);
    List<String> shards = new ArrayList<>();

    try {
        cloudSolrClient.connect();

        ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();

        ClusterState clusterState = zkStateReader.getClusterState();

        String[] collections;
        if (clusterState.hasCollection(chronixCollection)) {
            collections = new String[]{chronixCollection};
        } else {
            // might be a collection alias?
            Aliases aliases = zkStateReader.getAliases();
            String aliasedCollections = aliases.getCollectionAlias(chronixCollection);
            if (aliasedCollections == null)
                throw new IllegalArgumentException("Collection " + chronixCollection + " not found!");
            collections = aliasedCollections.split(",");
        }

        Set<String> liveNodes = clusterState.getLiveNodes();
        Random random = new Random(5150);

        for (String coll : collections) {
            for (Slice slice : clusterState.getSlices(coll)) {
                List<String> replicas = new ArrayList<>();
                for (Replica r : slice.getReplicas()) {
                    if (r.getState().equals(Replica.State.ACTIVE)) {
                        ZkCoreNodeProps replicaCoreProps = new ZkCoreNodeProps(r);
                        if (liveNodes.contains(replicaCoreProps.getNodeName()))
                            replicas.add(replicaCoreProps.getCoreUrl());
                    }
                }
                int numReplicas = replicas.size();
                if (numReplicas == 0)
                    throw new IllegalStateException("Shard " + slice.getName() + " in collection " +
                            coll + " does not have any active replicas!");

                String replicaUrl = (numReplicas == 1) ? replicas.get(0) : replicas.get(random.nextInt(replicas.size()));
                shards.add(replicaUrl);
            }
        }
    } finally {
        cloudSolrClient.close();
    }

    return shards;
}