org.apache.solr.client.solrj.impl.CloudSolrClient Java Examples

The following examples show how to use org.apache.solr.client.solrj.impl.CloudSolrClient. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AbstractFullDistribZkTestBase.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
protected void createCollection(String collName,
                                CloudSolrClient client,
                                int replicationFactor ,
                                int numShards ) throws Exception {
  int maxShardsPerNode = ((((numShards+1) * replicationFactor) / getCommonCloudSolrClient()
      .getZkStateReader().getClusterState().getLiveNodes().size())) + 1;
  int numNrtReplicas = useTlogReplicas()?0:replicationFactor;
  int numTlogReplicas = useTlogReplicas()?replicationFactor:0;
  Map<String, Object> props = makeMap(
      ZkStateReader.MAX_SHARDS_PER_NODE, maxShardsPerNode,
      ZkStateReader.NRT_REPLICAS, numNrtReplicas,
      ZkStateReader.TLOG_REPLICAS, numTlogReplicas,
      ZkStateReader.PULL_REPLICAS, getPullReplicaCount(),
      OverseerCollectionMessageHandler.NUM_SLICES, numShards);
  Map<String,List<Integer>> collectionInfos = new HashMap<>();
  createCollection(collectionInfos, collName, props, client);
}
 
Example #2
Source File: SimScenario.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public void execute(SimScenario scenario) throws Exception {
  String path = params.get("path");
  SnapshotCloudManager snapshotCloudManager;
  if (path == null) {
    String zkHost = params.get("zkHost");
    if (zkHost == null) {
      throw new IOException(SimAction.LOAD_SNAPSHOT + " must specify 'path' or 'zkHost'");
    } else {
      try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty()).build()) {
        cloudSolrClient.connect();
        try (SolrClientCloudManager realCloudManager = new SolrClientCloudManager(NoopDistributedQueueFactory.INSTANCE, cloudSolrClient)) {
          snapshotCloudManager = new SnapshotCloudManager(realCloudManager, null);
        }
      }
    }
  } else {
    snapshotCloudManager = SnapshotCloudManager.readSnapshot(new File(path));
  }
  scenario.cluster = SimCloudManager.createCluster(snapshotCloudManager, null, snapshotCloudManager.getTimeSource());
  scenario.config = scenario.cluster.getDistribStateManager().getAutoScalingConfig();
}
 
Example #3
Source File: TestLeaderElectionWithEmptyReplica.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private static int assertConsistentReplicas(CloudSolrClient cloudClient, Slice shard) throws SolrServerException, IOException {
    long numFound = Long.MIN_VALUE;
    int count = 0;
    for (Replica replica : shard.getReplicas()) {
      HttpSolrClient client = new HttpSolrClient.Builder(replica.getCoreUrl())
          .withHttpClient(cloudClient.getLbClient().getHttpClient()).build();
      QueryResponse response = client.query(new SolrQuery("q", "*:*", "distrib", "false"));
//      log.info("Found numFound={} on replica: {}", response.getResults().getNumFound(), replica.getCoreUrl());
      if (numFound == Long.MIN_VALUE)  {
        numFound = response.getResults().getNumFound();
      } else  {
        assertEquals("Shard " + shard.getName() + " replicas do not have same number of documents", numFound, response.getResults().getNumFound());
      }
      count++;
    }
    return count;
  }
 
Example #4
Source File: FullThrottleStoppableIndexingThread.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public FullThrottleStoppableIndexingThread(HttpClient httpClient, SolrClient controlClient, CloudSolrClient cloudClient, List<SolrClient> clients,
                                           String id, boolean doDeletes, int clientSoTimeout) {
  super(controlClient, cloudClient, id, doDeletes);
  setName("FullThrottleStopableIndexingThread");
  setDaemon(true);
  this.clients = clients;
  this.httpClient = httpClient;

  cusc = new ErrorLoggingConcurrentUpdateSolrClient.Builder(((HttpSolrClient) clients.get(0)).getBaseURL())
      .withHttpClient(httpClient)
      .withQueueSize(8)
      .withThreadCount(2)
      .withConnectionTimeout(10000)
      .withSocketTimeout(clientSoTimeout)
      .build();
}
 
Example #5
Source File: CollectionManagementService.java    From vind with Apache License 2.0 6 votes vote down vote up
public CollectionManagementService(List<String> zkHost, String ... repositories) throws IOException {
    this(repositories);
    this.zkHosts = zkHost;

    try (CloudSolrClient client = new CloudSolrClient.Builder(zkHost, Optional.empty()).build()) {
        NamedList result = client.request(new CollectionAdminRequest.ClusterStatus());

        if (((NamedList) ((NamedList) result.get("cluster")).get("collections")).get(".system") == null) {
            logger.warn("Blob store '.system' for runtime libs is not yet created. Will create one");

            try {
                Create create = CollectionAdminRequest
                        .createCollection(".system", BLOB_STORE_SHARDS, BLOB_STORE_REPLICAS);
                create.process(client);
                logger.info("Blob store has been created successfully");
            } catch (SolrServerException e1) {
                throw new IOException("Blob store is not available and cannot be created");
            }
        }

    } catch (SolrServerException | IOException e) {
        logger.error("Error in collection management service: {}", e.getMessage(), e);
        throw new IOException("Error in collection management service: " + e.getMessage(), e);
    }
}
 
Example #6
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Asserts that the collection has the correct number of shards and replicas
 */
protected void assertCollectionExpectations(String collectionName) throws Exception {
  CloudSolrClient client = this.createCloudClient(null);
  try {
    client.connect();
    ClusterState clusterState = client.getZkStateReader().getClusterState();

    assertTrue("Could not find new collection " + collectionName, clusterState.hasCollection(collectionName));
    Map<String, Slice> shards = clusterState.getCollection(collectionName).getSlicesMap();
    // did we find expectedSlices shards/shards?
    assertEquals("Found new collection " + collectionName + ", but mismatch on number of shards.", shardCount, shards.size());
    int totalShards = 0;
    for (String shardName : shards.keySet()) {
      totalShards += shards.get(shardName).getReplicas().size();
    }
    int expectedTotalShards = shardCount * replicationFactor;
    assertEquals("Found new collection " + collectionName + " with correct number of shards, but mismatch on number " +
        "of shards.", expectedTotalShards, totalShards);
  } finally {
    client.close();
  }
}
 
Example #7
Source File: TestCollectionAPI.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void testCollectionCreationShardNameValidation() throws Exception {
  try (CloudSolrClient client = createCloudClient(null)) {
    ModifiableSolrParams params = new ModifiableSolrParams();
    params.set("action", CollectionParams.CollectionAction.CREATE.toString());
    params.set("name", "valid_collection_name");
    params.set("router.name", "implicit");
    params.set("numShards", "1");
    params.set("shards", "invalid@name#with$weird%characters");
    @SuppressWarnings({"rawtypes"})
    SolrRequest request = new QueryRequest(params);
    request.setPath("/admin/collections");

    try {
      client.request(request);
      fail();
    } catch (BaseHttpSolrClient.RemoteSolrException e) {
      final String errorMessage = e.getMessage();
      assertTrue(errorMessage.contains("Invalid shard"));
      assertTrue(errorMessage.contains("invalid@name#with$weird%characters"));
      assertTrue(errorMessage.contains("shard names must consist entirely of"));
    }
  }
}
 
Example #8
Source File: CollectionManagementService.java    From vind with Apache License 2.0 6 votes vote down vote up
/**
 * 1. Check if config set is already deployed on the solr server, if not: download from repo and upload to zK
 * 2. Create collection
 * 3. Check if dependencies (runtime-libs) are installed, if not download and install (and name it with group:artifact:version)
 * 4. Add/Update collection runtime-libs
 *
 * @param collectionName {@link String} name of the collection to create.
 * @param configName should be either the name of an already defined configuration in the solr cloud or the full
 *                   name of an artifact accessible in one of the default repositories.
 * @param numOfShards integer number of shards
 * @param numOfReplicas integer number of replicas
 * @param autoAddReplicas boolean sets the Solr auto replication functionality on.
 * @throws {@link IOException} thrown if is not possible to create the collection.
 */
public void createCollection(String collectionName, String configName, int numOfShards, int numOfReplicas, Boolean autoAddReplicas) throws IOException {
    checkAndInstallConfiguration(configName);

    try (CloudSolrClient client = createCloudSolrClient()) {
        Create create = CollectionAdminRequest.
                createCollection(collectionName, configName, numOfShards, numOfReplicas);
        if(Objects.nonNull(autoAddReplicas)) {
            create.setAutoAddReplicas(autoAddReplicas);
        }
        create.process(client);
        logger.info("Collection '{}' created", collectionName);
    } catch (IOException | SolrServerException e) {
        throw new IOException("Cannot create collection", e);
    }

    Map<String,Long> runtimeDependencies = checkAndInstallRuntimeDependencies(collectionName);

    addOrUpdateRuntimeDependencies(runtimeDependencies, collectionName);
}
 
Example #9
Source File: CreateCollectionCleanupTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testAsyncCreateCollectionCleanup() throws Exception {
  final CloudSolrClient cloudClient = cluster.getSolrClient();
  String collectionName = "foo2";
  assertThat(CollectionAdminRequest.listCollections(cloudClient), not(hasItem(collectionName)));
  
  // Create a collection that would fail
  CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,"conf1",1,1);

  Properties properties = new Properties();
  Path tmpDir = createTempDir();
  tmpDir = tmpDir.resolve("foo");
  Files.createFile(tmpDir);
  properties.put(CoreAdminParams.DATA_DIR, tmpDir.toString());
  create.setProperties(properties);
  create.setAsyncId("testAsyncCreateCollectionCleanup");
  create.process(cloudClient);
  RequestStatusState state = AbstractFullDistribZkTestBase.getRequestStateAfterCompletion("testAsyncCreateCollectionCleanup", 30, cloudClient);
  assertThat(state.getKey(), is("failed"));

  // Confirm using LIST that the collection does not exist
  assertThat("Failed collection is still in the clusterstate: " + cluster.getSolrClient().getClusterStateProvider().getClusterState().getCollectionOrNull(collectionName), 
      CollectionAdminRequest.listCollections(cloudClient), not(hasItem(collectionName)));

}
 
Example #10
Source File: TestDocCollectionWatcher.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testWatcherIsRemovedAfterTimeout() throws Exception {
  CloudSolrClient client = cluster.getSolrClient();
  assertTrue("There should be no watchers for a non-existent collection!",
             client.getZkStateReader().getStateWatchers("no-such-collection").isEmpty());

  expectThrows(TimeoutException.class, () -> {
    client.waitForState("no-such-collection", 10, TimeUnit.MILLISECONDS,
                        (c) -> (false));
  });

  waitFor("Watchers for collection should be removed after timeout",
          MAX_WAIT_TIMEOUT, TimeUnit.SECONDS,
          () -> client.getZkStateReader().getStateWatchers("no-such-collection").isEmpty());

}
 
Example #11
Source File: TestCollectionAPI.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void listCollection() throws IOException, SolrServerException {
  try (CloudSolrClient client = createCloudClient(null)) {
    ModifiableSolrParams params = new ModifiableSolrParams();
    params.set("action", CollectionParams.CollectionAction.LIST.toString());
    @SuppressWarnings({"rawtypes"})
    SolrRequest request = new QueryRequest(params);
    request.setPath("/admin/collections");

    NamedList<Object> rsp = client.request(request);
    @SuppressWarnings({"unchecked"})
    List<String> collections = (List<String>) rsp.get("collections");
    assertTrue("control_collection was not found in list", collections.contains("control_collection"));
    assertTrue(DEFAULT_COLLECTION + " was not found in list", collections.contains(DEFAULT_COLLECTION));
    assertTrue(COLLECTION_NAME + " was not found in list", collections.contains(COLLECTION_NAME));
    assertTrue(COLLECTION_NAME1 + " was not found in list", collections.contains(COLLECTION_NAME1));
  }

}
 
Example #12
Source File: SolRDF.java    From SolRDF with Apache License 2.0 6 votes vote down vote up
/**
 * Builds a new SolRDF proxy instance.
 * 
 * @return a new SolRDF proxy instance.
 * @throws UnableToBuildSolRDFClientException in case of build failure.
 */
public SolRDF build() throws UnableToBuildSolRDFClientException {
	if (endpoints.isEmpty()) {
		endpoints.add(DEFAULT_ENDPOINT);
	}

	// FIXME: for DatasetAccessor and (HTTP) query execution service we also need something like LBHttpSolrServer
	final String firstEndpointAddress = endpoints.iterator().next();
	try {
		return new SolRDF(
				DatasetAccessorFactory.createHTTP(
						firstEndpointAddress +
						graphStoreProtocolEndpointPath),
				firstEndpointAddress + sparqlEndpointPath,		
				zkHost != null
					? new CloudSolrClient(zkHost)
					: (endpoints.size() == 1)
						? new HttpSolrClient(endpoints.iterator().next(), httpClient)
						: new LBHttpSolrClient(httpClient, endpoints.toArray(new String[endpoints.size()])));
	} catch (final Exception exception) {
		throw new UnableToBuildSolRDFClientException(exception);
	}	
}
 
Example #13
Source File: CollectionManagementService.java    From vind with Apache License 2.0 6 votes vote down vote up
/**
 * Adds or updates runtime dependency to a core
 * @param runtimeDependencies {@link Map} of {@link String} dependency name and its {@link Long} version number.
 * @param collectionName {@link String} name of the collection to update.
 */
protected void addOrUpdateRuntimeDependencies(Map<String, Long> runtimeDependencies, String collectionName) {
    logger.info("Adding runtime-dependencies for {}", collectionName);
    for(String blobName : runtimeDependencies.keySet()) {
        RuntimeLibRequest request = new RuntimeLibRequest(RuntimeLibRequestType.add, blobName, runtimeDependencies.get(blobName));
        try (CloudSolrClient client = createCloudSolrClient()) {
            client.request(request, collectionName);
            logger.debug("Added {} (v{})", request.blobName, request.version);
        } catch (SolrServerException | IOException e) {
            logger.warn("Cannot add runtime dependency {} (v{}) to collection {}", blobName, runtimeDependencies.get(blobName), collectionName); //TODO (minor) parse result
            logger.info("Try to update dependency");
            request.setType(RuntimeLibRequestType.update);

            try (CloudSolrClient client = createCloudSolrClient()) {
                client.request(request, collectionName);
            } catch (SolrServerException | IOException e1) {
                logger.warn("Cannot update runtime dependency {} (v{}) to collection {}", blobName, runtimeDependencies.get(blobName), collectionName); //TODO (minor) parse result
            }
        }
    }
}
 
Example #14
Source File: TestSolrCloudWithKerberosAlt.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void testCollectionCreateSearchDelete() throws Exception {
  CloudSolrClient client = cluster.getSolrClient();
  CollectionAdminRequest.createCollection(collectionName, configName, numShards, numReplicas)
      .setMaxShardsPerNode(maxShardsPerNode)
      .process(client);

  cluster.waitForActiveCollection(collectionName, numShards, numShards * numReplicas);

  // modify/query collection
  new UpdateRequest().add("id", "1").commit(client, collectionName);
  QueryResponse rsp = client.query(collectionName, new SolrQuery("*:*"));
  assertEquals(1, rsp.getResults().getNumFound());
      
  // delete the collection we created earlier
  CollectionAdminRequest.deleteCollection(collectionName).process(client);
      
  AbstractDistribZkTestBase.waitForCollectionToDisappear
      (collectionName, client.getZkStateReader(), true, 330);
}
 
Example #15
Source File: TestCollectionAPI.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void clusterStatusNoCollection() throws Exception {

    try (CloudSolrClient client = createCloudClient(null)) {
      ModifiableSolrParams params = new ModifiableSolrParams();
      params.set("action", CollectionParams.CollectionAction.CLUSTERSTATUS.toString());
      @SuppressWarnings({"rawtypes"})
      SolrRequest request = new QueryRequest(params);
      request.setPath("/admin/collections");

      NamedList<Object> rsp = client.request(request);
      @SuppressWarnings({"unchecked"})
      NamedList<Object> cluster = (NamedList<Object>) rsp.get("cluster");
      assertNotNull("Cluster state should not be null", cluster);
      @SuppressWarnings({"unchecked"})
      NamedList<Object> collections = (NamedList<Object>) cluster.get("collections");
      assertNotNull("Collections should not be null in cluster state", collections);
      assertNotNull(collections.get(COLLECTION_NAME1));
      assertEquals(4, collections.size());

      @SuppressWarnings({"unchecked"})
      List<String> liveNodes = (List<String>) cluster.get("live_nodes");
      assertNotNull("Live nodes should not be null", liveNodes);
      assertFalse(liveNodes.isEmpty());
    }

  }
 
Example #16
Source File: CollectionsAPIAsyncDistributedZkTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testSolrJAPICalls() throws Exception {

  final CloudSolrClient client = cluster.getSolrClient();

  RequestStatusState state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
      .processAndWait(client, MAX_TIMEOUT_SECONDS);
  assertSame("CreateCollection task did not complete!", RequestStatusState.COMPLETED, state);

  state = CollectionAdminRequest.createCollection("testasynccollectioncreation","conf1",1,1)
      .processAndWait(client, MAX_TIMEOUT_SECONDS);
  assertSame("Recreating a collection with the same should have failed.", RequestStatusState.FAILED, state);

  state = CollectionAdminRequest.addReplicaToShard("testasynccollectioncreation", "shard1")
    .processAndWait(client, MAX_TIMEOUT_SECONDS);
  assertSame("Add replica did not complete", RequestStatusState.COMPLETED, state);

  state = CollectionAdminRequest.splitShard("testasynccollectioncreation")
      .setShardName("shard1")
      .processAndWait(client, MAX_TIMEOUT_SECONDS * 2);
  assertEquals("Shard split did not complete. Last recorded state: " + state, RequestStatusState.COMPLETED, state);

}
 
Example #17
Source File: AutoScalingHandlerTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testUpdatePolicy() throws IOException, SolrServerException {
  CloudSolrClient solrClient = cluster.getSolrClient();
  String setPropertiesCommand = "{'set-cluster-policy': [" +
      "{'cores': '<4','node': '#ANY'}]}";
  solrClient.request(AutoScalingRequest.create(SolrRequest.METHOD.POST, setPropertiesCommand));
  @SuppressWarnings({"rawtypes"})
  SolrRequest req = AutoScalingRequest.create(SolrRequest.METHOD.GET, null);
  NamedList<Object> response = solrClient.request(req);
  assertEquals("<4", response._get("cluster-policy[0]/cores", null));
  assertEquals("#ANY", response._get("cluster-policy[0]/node", null));
  setPropertiesCommand = "{'set-cluster-policy': [" +
      "{'cores': '<3','node': '#ANY'}]}";
  solrClient.request(AutoScalingRequest.create(SolrRequest.METHOD.POST, setPropertiesCommand));
  req = AutoScalingRequest.create(SolrRequest.METHOD.GET, null);
  response = solrClient.request(req);
  assertEquals("<3", response._get("cluster-policy[0]/cores", null));
  assertEquals("#ANY", response._get("cluster-policy[0]/node", null));

}
 
Example #18
Source File: TestSolrConfigHandlerConcurrent.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings({"rawtypes"})
public static LinkedHashMapWriter getAsMap(String uri, CloudSolrClient cloudClient) throws Exception {
  HttpGet get = new HttpGet(uri) ;
  HttpEntity entity = null;
  try {
    entity = cloudClient.getLbClient().getHttpClient().execute(get).getEntity();
    String response = EntityUtils.toString(entity, StandardCharsets.UTF_8);
    try {
      return (LinkedHashMapWriter) Utils.MAPWRITEROBJBUILDER.apply(new JSONParser(new StringReader(response))).getVal();
    } catch (JSONParser.ParseException e) {
      log.error(response,e);
      throw e;
    }
  } finally {
    EntityUtils.consumeQuietly(entity);
    get.releaseConnection();
  }
}
 
Example #19
Source File: SolrCLI.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Get the base URL of a live Solr instance from either the solrUrl command-line option from ZooKeeper.
 */
public static String resolveSolrUrl(CommandLine cli) throws Exception {
  String solrUrl = cli.getOptionValue("solrUrl");
  if (solrUrl == null) {
    String zkHost = cli.getOptionValue("zkHost");
    if (zkHost == null)
      throw new IllegalStateException("Must provide either the '-solrUrl' or '-zkHost' parameters!");

    try (CloudSolrClient cloudSolrClient = new CloudSolrClient.Builder(Collections.singletonList(zkHost), Optional.empty()).build()) {
      cloudSolrClient.connect();
      Set<String> liveNodes = cloudSolrClient.getZkStateReader().getClusterState().getLiveNodes();
      if (liveNodes.isEmpty())
        throw new IllegalStateException("No live nodes found! Cannot determine 'solrUrl' from ZooKeeper: "+zkHost);

      String firstLiveNode = liveNodes.iterator().next();
      solrUrl = cloudSolrClient.getZkStateReader().getBaseUrlForNodeName(firstLiveNode);
    }
  }
  return solrUrl;
}
 
Example #20
Source File: SolrTestCaseJ4.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * This method <i>may</i> randomize unspecified aspects of the resulting SolrClient.
 * Tests that do not wish to have any randomized behavior should use the 
 * {@link org.apache.solr.client.solrj.impl.CloudSolrClient.Builder} class directly
 */ 
public static CloudSolrClient getCloudSolrClient(String zkHost, boolean shardLeadersOnly, HttpClient httpClient,
    int connectionTimeoutMillis, int socketTimeoutMillis) {
  if (shardLeadersOnly) {
    return new CloudSolrClientBuilder(Collections.singletonList(zkHost), Optional.empty())
        .withHttpClient(httpClient)
        .sendUpdatesOnlyToShardLeaders()
        .withConnectionTimeout(connectionTimeoutMillis)
        .withSocketTimeout(socketTimeoutMillis)
        .build();
  }
  return new CloudSolrClientBuilder(Collections.singletonList(zkHost), Optional.empty())
      .withHttpClient(httpClient)
      .sendUpdatesToAllReplicasInShard()
      .withConnectionTimeout(connectionTimeoutMillis)
      .withSocketTimeout(socketTimeoutMillis)
      .build();
}
 
Example #21
Source File: DocValuesNotIndexedTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void checkSortOrder(CloudSolrClient client, List<FieldProps> props, String sortDir, String[] order, String[] orderBool) throws IOException, SolrServerException {
  for (FieldProps prop : props) {
    final SolrQuery solrQuery = new SolrQuery("q", "*:*", "rows", "100");
    solrQuery.setSort(prop.getName(), "asc".equals(sortDir) ? SolrQuery.ORDER.asc : SolrQuery.ORDER.desc);
    solrQuery.addSort("id", SolrQuery.ORDER.asc);
    final QueryResponse rsp = client.query(COLLECTION, solrQuery);
    SolrDocumentList res = rsp.getResults();
    assertEquals("Should have exactly " + order.length + " documents returned", order.length, res.getNumFound());
    String expected;
    for (int idx = 0; idx < res.size(); ++idx) {
      if (prop.getName().startsWith("bool")) expected = orderBool[idx];
      else expected = order[idx];
      assertEquals("Documents in wrong order for field: " + prop.getName(),
          expected, res.get(idx).get("id"));
    }
  }
}
 
Example #22
Source File: SolrCloudTestCase.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Configure, run and return the {@link MiniSolrCloudCluster}
 *
 * @throws Exception if an error occurs on startup
 */
public MiniSolrCloudCluster build() throws Exception {
  JettyConfig jettyConfig = jettyConfigBuilder.build();
  MiniSolrCloudCluster cluster = new MiniSolrCloudCluster(nodeCount, baseDir, solrxml, jettyConfig,
      null, securityJson, trackJettyMetrics);
  CloudSolrClient client = cluster.getSolrClient();
  for (Config config : configs) {
    ((ZkClientClusterStateProvider)client.getClusterStateProvider()).uploadConfig(config.path, config.name);
  }

  if (clusterProperties.size() > 0) {
    ClusterProperties props = new ClusterProperties(cluster.getSolrClient().getZkStateReader().getZkClient());
    for (Map.Entry<String, Object> entry : clusterProperties.entrySet()) {
      props.setClusterProperty(entry.getKey(), entry.getValue());
    }
  }
  return cluster;
}
 
Example #23
Source File: HBaseIndexerMapper.java    From hbase-indexer with Apache License 2.0 6 votes vote down vote up
private DirectSolrInputDocumentWriter createCloudSolrWriter(Context context, Map<String, String> indexConnectionParams)
        throws IOException {
    String indexZkHost = indexConnectionParams.get(SolrConnectionParams.ZOOKEEPER);
    String collectionName = indexConnectionParams.get(SolrConnectionParams.COLLECTION);

    if (indexZkHost == null) {
        throw new IllegalStateException("No index ZK host defined");
    }

    if (collectionName == null) {
        throw new IllegalStateException("No collection name defined");
    }
    CloudSolrClient solrServer = new CloudSolrClient.Builder().withZkHost(indexZkHost).build();
    int zkSessionTimeout = HBaseIndexerConfiguration.getSessionTimeout(context.getConfiguration());
    solrServer.setZkClientTimeout(zkSessionTimeout);
    solrServer.setZkConnectTimeout(zkSessionTimeout);      
    solrServer.setDefaultCollection(collectionName);

    return new DirectSolrInputDocumentWriter(context.getConfiguration().get(INDEX_NAME_CONF_KEY), solrServer);
}
 
Example #24
Source File: SolrSchema.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
protected Map<String, Table> getTableMap() {
  String zk = this.properties.getProperty("zk");
  CloudSolrClient cloudSolrClient = solrClientCache.getCloudSolrClient(zk);
  ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
  ClusterState clusterState = zkStateReader.getClusterState();

  final ImmutableMap.Builder<String, Table> builder = ImmutableMap.builder();

  Set<String> collections = clusterState.getCollectionsMap().keySet();
  for (String collection : collections) {
    builder.put(collection, new SolrTable(this, collection));
  }

  Aliases aliases = zkStateReader.getAliases();
  for (String alias : aliases.getCollectionAliasListMap().keySet()) {
    // don't create duplicate entries
    if (!collections.contains(alias)) {
      builder.put(alias, new SolrTable(this, alias));
    }
  }

  return builder.build();
}
 
Example #25
Source File: AbstractFullDistribZkTestBase.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("rawtypes")
protected static int sendDocsWithRetry(CloudSolrClient cloudClient, String collection, List<SolrInputDocument> batch, int minRf, int maxRetries, int waitBeforeRetry) throws Exception {
  UpdateRequest up = new UpdateRequest();
  up.add(batch);
  NamedList resp = null;
  int numRetries = 0;
  while(true) {
    try {
      resp = cloudClient.request(up, collection);
      return cloudClient.getMinAchievedReplicationFactor(cloudClient.getDefaultCollection(), resp);
    } catch (Exception exc) {
      Throwable rootCause = SolrException.getRootCause(exc);
      if (++numRetries <= maxRetries) {
        log.warn("ERROR: {} ... Sleeping for {} seconds before re-try ...", rootCause, waitBeforeRetry);
        Thread.sleep(waitBeforeRetry * 1000L);
      } else {
        log.error("No more retries available! Add batch failed due to: {}", rootCause);
        throw exc;
      }
    }
  }
}
 
Example #26
Source File: BaseCdcrDistributedZkTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void deleteById(String collection, List<String> ids) throws IOException, SolrServerException {
  CloudSolrClient client = createCloudClient(collection);
  try {
    client.deleteById(ids);
    client.commit(true, true);
  } finally {
    client.close();
  }
}
 
Example #27
Source File: CloudSolrSinkTask.java    From kafka-connect-solr with Apache License 2.0 5 votes vote down vote up
@Override
public void start(Map<String, String> settings) {
  super.start(settings);
  CloudSolrClient.Builder builder = new CloudSolrClient.Builder();
  builder.withZkHost(this.config.zookeeperHosts);
  builder.withZkChroot(this.config.zookeeperChroot);
  builder.withConnectionTimeout(this.config.solrConnectTimeoutMs);
  builder.withSocketTimeout(this.config.solrSocketTimeoutMs);
  this.client = builder.build();
  this.client.setZkConnectTimeout(this.config.zookeeperConnectTimeoutMs);
  this.client.setZkClientTimeout(this.config.zookeeperClientTimeoutMs);
  this.client.setRetryExpiryTime((int) TimeUnit.SECONDS.convert(this.config.zookeeperRetryExpiryTimeMs, TimeUnit.MILLISECONDS));
}
 
Example #28
Source File: AbstractFullDistribZkTestBase.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected CloudSolrClient getCommonCloudSolrClient() {
  synchronized (this) {
    if (commonCloudSolrClient == null) {
      commonCloudSolrClient = getCloudSolrClient(zkServer.getZkAddress(),
          random().nextBoolean(), 5000, 120000);
      commonCloudSolrClient.setDefaultCollection(DEFAULT_COLLECTION);
      commonCloudSolrClient.connect();
      if (log.isInfoEnabled()) {
        log.info("Created commonCloudSolrClient with updatesToLeaders={} and parallelUpdates={}",
            commonCloudSolrClient.isUpdatesToLeaders(), commonCloudSolrClient.isParallelUpdates());
      }
    }
  }
  return commonCloudSolrClient;
}
 
Example #29
Source File: SplitShardTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
CloudSolrClient createCollection(String collectionName, int repFactor) throws Exception {

      CollectionAdminRequest
          .createCollection(collectionName, "conf", 1, repFactor)
          .setMaxShardsPerNode(100)
          .process(cluster.getSolrClient());

    cluster.waitForActiveCollection(collectionName, 1, repFactor);

    CloudSolrClient client = cluster.getSolrClient();
    client.setDefaultCollection(collectionName);
    return client;
  }
 
Example #30
Source File: QuerySolrIT.java    From nifi with Apache License 2.0 5 votes vote down vote up
@BeforeClass
public static void setup() throws IOException, SolrServerException {
    CloudSolrClient solrClient = createSolrClient();
    Path currentDir = Paths.get(ZK_CONFIG_PATH);
    ZkClientClusterStateProvider stateProvider = new ZkClientClusterStateProvider(SOLR_LOCATION);
    stateProvider.uploadConfig(currentDir, ZK_CONFIG_NAME);
    solrClient.setDefaultCollection(SOLR_COLLECTION);

    if (!solrClient.getZkStateReader().getClusterState().hasCollection(SOLR_COLLECTION)) {
        CollectionAdminRequest.Create createCollection = CollectionAdminRequest.createCollection(SOLR_COLLECTION, ZK_CONFIG_NAME, 1, 1);
        createCollection.process(solrClient);
    } else {
        solrClient.deleteByQuery("*:*");
    }

    for (int i = 0; i < 10; i++) {
        SolrInputDocument doc = new SolrInputDocument();
        doc.addField("id", "doc" + i);
        Date date = new Date();
        doc.addField("created", DATE_FORMAT.format(date));
        doc.addField("string_single", "single" + i + ".1");
        doc.addField("string_multi", "multi" + i + ".1");
        doc.addField("string_multi", "multi" + i + ".2");
        doc.addField("integer_single", i);
        doc.addField("integer_multi", 1);
        doc.addField("integer_multi", 2);
        doc.addField("integer_multi", 3);
        doc.addField("double_single", 0.5 + i);

        solrClient.add(doc);
    }
    solrClient.commit();
}