Java Code Examples for org.apache.solr.common.cloud.ZkStateReader#getClusterState()

The following examples show how to use org.apache.solr.common.cloud.ZkStateReader#getClusterState() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SyncSliceTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void waitTillAllNodesActive() throws Exception {
  for (int i = 0; i < 60; i++) { 
    Thread.sleep(3000);
    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
    ClusterState clusterState = zkStateReader.getClusterState();
    DocCollection collection1 = clusterState.getCollection("collection1");
    Slice slice = collection1.getSlice("shard1");
    Collection<Replica> replicas = slice.getReplicas();
    boolean allActive = true;
    for (Replica replica : replicas) {
      if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
        allActive = false;
        break;
      }
    }
    if (allActive) {
      return;
    }
  }
  printLayout();
  fail("timeout waiting to see all nodes active");
}
 
Example 2
Source File: CreateAliasCmd.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void validateAllCollectionsExistAndNoDuplicates(List<String> collectionList, ZkStateReader zkStateReader) {
  final String collectionStr = StrUtils.join(collectionList, ',');

  if (new HashSet<>(collectionList).size() != collectionList.size()) {
    throw new SolrException(BAD_REQUEST,
        String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', since it contains duplicates", collectionStr));
  }
  ClusterState clusterState = zkStateReader.getClusterState();
  Set<String> aliasNames = zkStateReader.getAliases().getCollectionAliasListMap().keySet();
  for (String collection : collectionList) {
    if (clusterState.getCollectionOrNull(collection) == null && !aliasNames.contains(collection)) {
      throw new SolrException(BAD_REQUEST,
          String.format(Locale.ROOT,  "Can't create collection alias for collections='%s', '%s' is not an existing collection or alias", collectionStr, collection));
    }
  }
}
 
Example 3
Source File: SolrSchema.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
protected Map<String, Table> getTableMap() {
  String zk = this.properties.getProperty("zk");
  CloudSolrClient cloudSolrClient = solrClientCache.getCloudSolrClient(zk);
  ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
  ClusterState clusterState = zkStateReader.getClusterState();

  final ImmutableMap.Builder<String, Table> builder = ImmutableMap.builder();

  Set<String> collections = clusterState.getCollectionsMap().keySet();
  for (String collection : collections) {
    builder.put(collection, new SolrTable(this, collection));
  }

  Aliases aliases = zkStateReader.getAliases();
  for (String alias : aliases.getCollectionAliasListMap().keySet()) {
    // don't create duplicate entries
    if (!collections.contains(alias)) {
      builder.put(alias, new SolrTable(this, alias));
    }
  }

  return builder.build();
}
 
Example 4
Source File: OverseerTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void waitForCollections(ZkStateReader stateReader, String... collections) throws InterruptedException, KeeperException, TimeoutException {
  int maxIterations = 100;
  while (0 < maxIterations--) {

    final ClusterState state = stateReader.getClusterState();
    Set<String> availableCollections = state.getCollectionsMap().keySet();
    int availableCount = 0;
    for(String requiredCollection: collections) {
      stateReader.waitForState(requiredCollection, 30000, TimeUnit.MILLISECONDS, (liveNodes, collectionState) ->  collectionState != null);
      if(availableCollections.contains(requiredCollection)) {
        availableCount++;
      }
      if(availableCount == collections.length) return;

    }
  }
  log.warn("Timeout waiting for collections: {} state: {}"
      , Arrays.asList(collections), stateReader.getClusterState());
}
 
Example 5
Source File: ChaosMonkeyShardSplitTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void waitTillRecovered() throws Exception {
  for (int i = 0; i < 30; i++) {
    Thread.sleep(3000);
    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
    zkStateReader.forceUpdateCollection("collection1");
    ClusterState clusterState = zkStateReader.getClusterState();
    DocCollection collection1 = clusterState.getCollection("collection1");
    Slice slice = collection1.getSlice("shard1");
    Collection<Replica> replicas = slice.getReplicas();
    boolean allActive = true;
    for (Replica replica : replicas) {
      if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
        allActive = false;
        break;
      }
    }
    if (allActive) {
      return;
    }
  }
  printLayout();
  fail("timeout waiting to see recovered node");
}
 
Example 6
Source File: Solr6Index.java    From atlas with Apache License 2.0 5 votes vote down vote up
/**
 * Checks if the collection has already been created in Solr.
 */
private static boolean checkIfCollectionExists(CloudSolrClient server, String collection) throws KeeperException, InterruptedException {
    final ZkStateReader zkStateReader = server.getZkStateReader();
    zkStateReader.forceUpdateCollection(collection);
    final ClusterState clusterState = zkStateReader.getClusterState();
    return clusterState.getCollectionOrNull(collection) != null;
}
 
Example 7
Source File: BlobRepository.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private Replica getSystemCollReplica() {
  ZkStateReader zkStateReader = this.coreContainer.getZkController().getZkStateReader();
  ClusterState cs = zkStateReader.getClusterState();
  DocCollection coll = cs.getCollectionOrNull(CollectionAdminParams.SYSTEM_COLL);
  if (coll == null)
    throw new SolrException(SERVICE_UNAVAILABLE, CollectionAdminParams.SYSTEM_COLL + " collection not available");
  ArrayList<Slice> slices = new ArrayList<>(coll.getActiveSlices());
  if (slices.isEmpty())
    throw new SolrException(SERVICE_UNAVAILABLE, "No active slices for " + CollectionAdminParams.SYSTEM_COLL + " collection");
  Collections.shuffle(slices, RANDOM); //do load balancing

  Replica replica = null;
  for (Slice slice : slices) {
    List<Replica> replicas = new ArrayList<>(slice.getReplicasMap().values());
    Collections.shuffle(replicas, RANDOM);
    for (Replica r : replicas) {
      if (r.getState() == Replica.State.ACTIVE) {
        if (zkStateReader.getClusterState().getLiveNodes().contains(r.get(ZkStateReader.NODE_NAME_PROP))) {
          replica = r;
          break;
        } else {
          if (log.isInfoEnabled()) {
            log.info("replica {} says it is active but not a member of live nodes", r.get(ZkStateReader.NODE_NAME_PROP));
          }
        }
      }
    }
  }
  if (replica == null) {
    throw new SolrException(SERVICE_UNAVAILABLE, "No active replica available for " + CollectionAdminParams.SYSTEM_COLL + " collection");
  }
  return replica;
}
 
Example 8
Source File: Solr5Index.java    From incubator-atlas with Apache License 2.0 5 votes vote down vote up
/**
 * Wait for all the collection shards to be ready.
 */
private static void waitForRecoveriesToFinish(CloudSolrClient server, String collection) throws KeeperException, InterruptedException {
    ZkStateReader zkStateReader = server.getZkStateReader();
    try {
        boolean cont = true;

        while (cont) {
            boolean sawLiveRecovering = false;
            zkStateReader.updateClusterState();
            ClusterState clusterState = zkStateReader.getClusterState();
            Map<String, Slice> slices = clusterState.getSlicesMap(collection);
            Preconditions.checkNotNull("Could not find collection:" + collection, slices);

            for (Map.Entry<String, Slice> entry : slices.entrySet()) {
                Map<String, Replica> shards = entry.getValue().getReplicasMap();
                for (Map.Entry<String, Replica> shard : shards.entrySet()) {
                    String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
                    if ((state.equals(Replica.State.RECOVERING.toString())
                            || state.equals(Replica.State.DOWN.toString()))
                            && clusterState.liveNodesContain(shard.getValue().getStr(
                            ZkStateReader.NODE_NAME_PROP))) {
                        sawLiveRecovering = true;
                    }
                }
            }
            if (!sawLiveRecovering) {
                cont = false;
            } else {
                Thread.sleep(1000);
            }
        }
    } finally {
        logger.info("Exiting solr wait");
    }
}
 
Example 9
Source File: LeaderFailureAfterFreshStartTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void waitTillNodesActive() throws Exception {
  for (int i = 0; i < 60; i++) {
    Thread.sleep(3000);
    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
    ClusterState clusterState = zkStateReader.getClusterState();
    DocCollection collection1 = clusterState.getCollection("collection1");
    Slice slice = collection1.getSlice("shard1");
    Collection<Replica> replicas = slice.getReplicas();
    boolean allActive = true;

    Collection<String> nodesDownNames = nodesDown.stream()
        .map(n -> n.coreNodeName)
        .collect(Collectors.toList());
    
    Collection<Replica> replicasToCheck = null;
    replicasToCheck = replicas.stream()
        .filter(r -> !nodesDownNames.contains(r.getName()))
        .collect(Collectors.toList());

    for (Replica replica : replicasToCheck) {
      if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
        allActive = false;
        break;
      }
    }
    if (allActive) {
      return;
    }
  }
  printLayout();
  fail("timeout waiting to see all nodes active");
}
 
Example 10
Source File: PeerSyncReplicationTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void waitTillNodesActive() throws Exception {
  for (int i = 0; i < 60; i++) {
    Thread.sleep(3000);
    ZkStateReader zkStateReader = cloudClient.getZkStateReader();
    ClusterState clusterState = zkStateReader.getClusterState();
    DocCollection collection1 = clusterState.getCollection("collection1");
    Slice slice = collection1.getSlice("shard1");
    Collection<Replica> replicas = slice.getReplicas();
    boolean allActive = true;

    Collection<String> nodesDownNames =
        nodesDown.stream()
            .map(n -> n.coreNodeName)
            .collect(Collectors.toList());

    Collection<Replica> replicasToCheck =
        replicas.stream()
            .filter(r -> !nodesDownNames.contains(r.getName()))
            .collect(Collectors.toList());

    for (Replica replica : replicasToCheck) {
      if (!clusterState.liveNodesContain(replica.getNodeName()) || replica.getState() != Replica.State.ACTIVE) {
        allActive = false;
        break;
      }
    }
    if (allActive) {
      return;
    }
  }
  printLayout();
  fail("timeout waiting to see all nodes active");
}
 
Example 11
Source File: AnalyticsShardRequestManager.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Pick one replica from each shard to send the shard requests to.
 *
 * @param collection that is being queried
 * @throws IOException if an exception occurs while finding replicas
 */
protected void pickShards(String collection) throws IOException {
  try {

    ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
    ClusterState clusterState = zkStateReader.getClusterState();
    Set<String> liveNodes = clusterState.getLiveNodes();

    Slice[] slices = clusterState.getCollection(collection).getActiveSlicesArr();

    for(Slice slice : slices) {
      Collection<Replica> replicas = slice.getReplicas();
      List<Replica> shuffler = new ArrayList<>();
      for(Replica replica : replicas) {
        if(replica.getState() == Replica.State.ACTIVE && liveNodes.contains(replica.getNodeName()))
        shuffler.add(replica);
      }

      Collections.shuffle(shuffler, new Random());
      Replica rep = shuffler.get(0);
      ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep);
      String url = zkProps.getCoreUrl();
      replicaUrls.add(url);
    }
  } catch (Exception e) {
    throw new IOException(e);
  }
}
 
Example 12
Source File: TopicStream.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void getPersistedCheckpoints() throws IOException {
  ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
  Slice[] slices = CloudSolrStream.getSlices(checkpointCollection, zkStateReader, false);

  ClusterState clusterState = zkStateReader.getClusterState();
  Set<String> liveNodes = clusterState.getLiveNodes();

  OUTER:
  for(Slice slice : slices) {
    Collection<Replica> replicas = slice.getReplicas();
    for(Replica replica : replicas) {
      if(replica.getState() == Replica.State.ACTIVE && liveNodes.contains(replica.getNodeName())){
        HttpSolrClient httpClient = streamContext.getSolrClientCache().getHttpSolrClient(replica.getCoreUrl());
        try {
          SolrDocument doc = httpClient.getById(id);
          if(doc != null) {
            @SuppressWarnings({"unchecked"})
            List<String> checkpoints = (List<String>)doc.getFieldValue("checkpoint_ss");
            for (String checkpoint : checkpoints) {
              String[] pair = checkpoint.split("~");
              this.checkpoints.put(pair[0], Long.parseLong(pair[1]));
            }
          }
        } catch (Exception e) {
          throw new IOException(e);
        }
        break OUTER;
      }
    }
  }
}
 
Example 13
Source File: DeepRandomStream.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public static Slice[] getSlices(String collectionName, ZkStateReader zkStateReader, boolean checkAlias) throws IOException {
  ClusterState clusterState = zkStateReader.getClusterState();

  Map<String, DocCollection> collectionsMap = clusterState.getCollectionsMap();

  //TODO we should probably split collection by comma to query more than one
  //  which is something already supported in other parts of Solr

  // check for alias or collection

  List<String> allCollections = new ArrayList<>();
  String[] collectionNames = collectionName.split(",");
  for(String col : collectionNames) {
    List<String> collections = checkAlias
        ? zkStateReader.getAliases().resolveAliases(col)  // if not an alias, returns collectionName
        : Collections.singletonList(collectionName);
    allCollections.addAll(collections);
  }

  // Lookup all actives slices for these collections
  List<Slice> slices = allCollections.stream()
      .map(collectionsMap::get)
      .filter(Objects::nonNull)
      .flatMap(docCol -> Arrays.stream(docCol.getActiveSlicesArr()))
      .collect(Collectors.toList());
  if (!slices.isEmpty()) {
    return slices.toArray(new Slice[slices.size()]);
  }

  // Check collection case insensitive
  for(Entry<String, DocCollection> entry : collectionsMap.entrySet()) {
    if(entry.getKey().equalsIgnoreCase(collectionName)) {
      return entry.getValue().getActiveSlicesArr();
    }
  }

  throw new IOException("Slices not found for " + collectionName);
}
 
Example 14
Source File: Solr6Index.java    From atlas with Apache License 2.0 5 votes vote down vote up
/**
 * Wait for all the collection shards to be ready.
 */
private static void waitForRecoveriesToFinish(CloudSolrClient server, String collection) throws KeeperException, InterruptedException {
    final ZkStateReader zkStateReader = server.getZkStateReader();
    try {
        boolean cont = true;

        while (cont) {
            boolean sawLiveRecovering = false;
            zkStateReader.forceUpdateCollection(collection);
            final ClusterState clusterState = zkStateReader.getClusterState();
            final Map<String, Slice> slices = clusterState.getCollection(collection).getSlicesMap();
            Preconditions.checkNotNull(slices, "Could not find collection:" + collection);

            // change paths for Replica.State per Solr refactoring
            // remove SYNC state per: http://tinyurl.com/pag6rwt
            for (final Map.Entry<String, Slice> entry : slices.entrySet()) {
                final Map<String, Replica> shards = entry.getValue().getReplicasMap();
                for (final Map.Entry<String, Replica> shard : shards.entrySet()) {
                    final String state = shard.getValue().getStr(ZkStateReader.STATE_PROP).toUpperCase();
                    if ((Replica.State.RECOVERING.name().equals(state) || Replica.State.DOWN.name().equals(state))
                            && clusterState.liveNodesContain(shard.getValue().getStr(
                            ZkStateReader.NODE_NAME_PROP))) {
                        sawLiveRecovering = true;
                    }
                }
            }


            if (!sawLiveRecovering) {
                cont = false;
            } else {
                Thread.sleep(1000);
            }
        }
    } finally {
        logger.info("Exiting solr wait");
    }
}
 
Example 15
Source File: TestSolrCloudClusterSupport.java    From storm-solr with Apache License 2.0 4 votes vote down vote up
protected static void ensureAllReplicasAreActive(String testCollectionName, int shards, int rf, int maxWaitSecs)
    throws Exception {
  long startMs = System.currentTimeMillis();

  ZkStateReader zkr = cloudSolrClient.getZkStateReader();
  zkr.updateClusterState(); // force the state to be fresh

  ClusterState cs = zkr.getClusterState();
  Collection<Slice> slices = cs.getActiveSlices(testCollectionName);
  assertTrue(slices.size() == shards);
  boolean allReplicasUp = false;
  long waitMs = 0L;
  long maxWaitMs = maxWaitSecs * 1000L;
  Replica leader = null;
  while (waitMs < maxWaitMs && !allReplicasUp) {
    // refresh state every 2 secs
    if (waitMs % 2000 == 0) {
      log.info("Updating ClusterState");
      cloudSolrClient.getZkStateReader().updateClusterState();
    }

    cs = cloudSolrClient.getZkStateReader().getClusterState();
    assertNotNull(cs);
    allReplicasUp = true; // assume true
    for (Slice shard : cs.getActiveSlices(testCollectionName)) {
      String shardId = shard.getName();
      assertNotNull("No Slice for " + shardId, shard);
      Collection<Replica> replicas = shard.getReplicas();
      assertTrue(replicas.size() == rf);
      leader = shard.getLeader();
      assertNotNull(leader);
      log.info("Found " + replicas.size() + " replicas and leader on " +
          leader.getNodeName() + " for " + shardId + " in " + testCollectionName);

      // ensure all replicas are "active"
      for (Replica replica : replicas) {
        String replicaState = replica.getStr(ZkStateReader.STATE_PROP);
        if (!"active".equals(replicaState)) {
          log.info("Replica " + replica.getName() + " for shard " + shardId + " is currently " + replicaState);
          allReplicasUp = false;
        }
      }
    }

    if (!allReplicasUp) {
      try {
        Thread.sleep(500L);
      } catch (Exception ignoreMe) {
      }
      waitMs += 500L;
    }
  } // end while

  if (!allReplicasUp)
    fail("Didn't see all replicas for " + testCollectionName +
        " come up within " + maxWaitMs + " ms! ClusterState: " + printClusterStateInfo(testCollectionName));

  long diffMs = (System.currentTimeMillis() - startMs);
  log.info("Took " + diffMs + " ms to see all replicas become active for " + testCollectionName);
}
 
Example 16
Source File: SharedFSAutoReplicaFailoverTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private boolean waitingForReplicasNotLive(ZkStateReader zkStateReader, int timeoutInMs, List<JettySolrRunner> jetties) {
  Set<String> nodeNames = jetties.stream()
      .filter(jetty -> jetty.getCoreContainer() != null)
      .map(JettySolrRunner::getNodeName)
      .collect(Collectors.toSet());
  long timeout = System.nanoTime()
      + TimeUnit.NANOSECONDS.convert(timeoutInMs, TimeUnit.MILLISECONDS);
  boolean success = false;
  while (!success && System.nanoTime() < timeout) {
    success = true;
    ClusterState clusterState = zkStateReader.getClusterState();
    if (clusterState != null) {
      Map<String, DocCollection> collections = clusterState.getCollectionsMap();
      for (Map.Entry<String, DocCollection> entry : collections.entrySet()) {
        DocCollection docCollection = entry.getValue();
        Collection<Slice> slices = docCollection.getSlices();
        for (Slice slice : slices) {
          // only look at active shards
          if (slice.getState() == Slice.State.ACTIVE) {
            Collection<Replica> replicas = slice.getReplicas();
            for (Replica replica : replicas) {
              if (nodeNames.contains(replica.getNodeName())) {
                boolean live = clusterState.liveNodesContain(replica
                    .getNodeName());
                if (live) {
                  success = false;
                }
              }
            }
          }
        }
      }
      if (!success) {
        try {
          Thread.sleep(500);
        } catch (InterruptedException e) {
          Thread.currentThread().interrupt();
          throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Interrupted");
        }
      }
    }
  }

  return success;
}
 
Example 17
Source File: TopicStream.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
protected void constructStreams() throws IOException {
  try {
    ZkStateReader zkStateReader = cloudSolrClient.getZkStateReader();
    Slice[] slices = CloudSolrStream.getSlices(this.collection, zkStateReader, false);

    ModifiableSolrParams mParams = new ModifiableSolrParams(params);
    mParams.set(DISTRIB, "false"); // We are the aggregator.
    String fl = mParams.get("fl");
    mParams.set(SORT, "_version_ asc");
    if(!fl.contains(VERSION_FIELD)) {
      fl += ",_version_";
    }
    mParams.set("fl", fl);

    Random random = new Random();

    ClusterState clusterState = zkStateReader.getClusterState();
    Set<String> liveNodes = clusterState.getLiveNodes();

    for(Slice slice : slices) {
      ModifiableSolrParams localParams = new ModifiableSolrParams(mParams);
      long checkpoint = checkpoints.get(slice.getName());

      Collection<Replica> replicas = slice.getReplicas();
      List<Replica> shuffler = new ArrayList<>();
      for(Replica replica : replicas) {
        if(replica.getState() == Replica.State.ACTIVE && liveNodes.contains(replica.getNodeName()))
          shuffler.add(replica);
      }

      Replica rep = shuffler.get(random.nextInt(shuffler.size()));
      ZkCoreNodeProps zkProps = new ZkCoreNodeProps(rep);
      String url = zkProps.getCoreUrl();
      SolrStream solrStream = new SolrStream(url, localParams);
      solrStream.setSlice(slice.getName());
      solrStream.setCheckpoint(checkpoint);
      solrStream.setTrace(true);
      if(streamContext != null) {
        solrStream.setStreamContext(streamContext);
      }
      solrStreams.add(solrStream);
    }
  } catch (Exception e) {
    throw new IOException(e);
  }
}
 
Example 18
Source File: LeaderElectionContextKeyTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
public void test() throws KeeperException, InterruptedException, IOException, SolrServerException {
  ZkStateReader stateReader = cluster.getSolrClient().getZkStateReader();
  stateReader.forceUpdateCollection(TEST_COLLECTION_1);
  ClusterState clusterState = stateReader.getClusterState();
  // The test assume that TEST_COLLECTION_1 and TEST_COLLECTION_2 will have identical layout
  // ( same replica's name on every shard )
  for (int i = 1; i <= 2; i++) {
    String coll1ShardiLeader = clusterState.getCollection(TEST_COLLECTION_1).getLeader("shard"+i).getName();
    String coll2ShardiLeader = clusterState.getCollection(TEST_COLLECTION_2).getLeader("shard"+i).getName();
    String assertMss = String.format(Locale.ROOT, "Expect %s and %s each have a replica with same name on shard %s",
        coll1ShardiLeader, coll2ShardiLeader, "shard"+i);
    assertEquals(
        assertMss,
        coll1ShardiLeader,
        coll2ShardiLeader
    );
  }

  String shard = "shard" + String.valueOf(random().nextInt(2) + 1);
  Replica replica = clusterState.getCollection(TEST_COLLECTION_1).getLeader(shard);
  assertNotNull(replica);

  try (SolrClient shardLeaderClient = new HttpSolrClient.Builder(replica.get("base_url").toString()).build()) {
    assertEquals(1L, getElectionNodes(TEST_COLLECTION_1, shard, stateReader.getZkClient()).size());
    List<String> collection2Shard1Nodes = getElectionNodes(TEST_COLLECTION_2, "shard1", stateReader.getZkClient());
    List<String> collection2Shard2Nodes = getElectionNodes(TEST_COLLECTION_2, "shard2", stateReader.getZkClient());
    CoreAdminRequest.unloadCore(replica.getCoreName(), shardLeaderClient);
    // Waiting for leader election being kicked off
    long timeout = System.nanoTime() + TimeUnit.NANOSECONDS.convert(60, TimeUnit.SECONDS);
    boolean found = false;
    while (System.nanoTime() < timeout) {
      try {
        found = getElectionNodes(TEST_COLLECTION_1, shard, stateReader.getZkClient()).size() == 0;
        break;
      } catch (KeeperException.NoNodeException nne) {
        // ignore
      }
    }
    assertTrue(found);
    // There are no leader election was kicked off on testCollection2
    assertThat(collection2Shard1Nodes, CoreMatchers.is(getElectionNodes(TEST_COLLECTION_2, "shard1", stateReader.getZkClient())));
    assertThat(collection2Shard2Nodes, CoreMatchers.is(getElectionNodes(TEST_COLLECTION_2, "shard2", stateReader.getZkClient())));
  }
}
 
Example 19
Source File: AbstractSolrSentryTestBase.java    From incubator-sentry with Apache License 2.0 4 votes vote down vote up
protected static void waitForRecoveriesToFinish(String collection,
                                                CloudSolrServer solrServer,
                                                boolean verbose,
                                                boolean failOnTimeout,
                                                int timeoutSeconds) throws Exception {
  LOG.info("Entering solr wait with timeout " + timeoutSeconds);
  ZkStateReader zkStateReader = solrServer.getZkStateReader();
  try {
    boolean cont = true;
    int cnt = 0;

    while (cont) {
      if (verbose) {
        LOG.debug("-");
      }
      boolean sawLiveRecovering = false;
      zkStateReader.updateClusterState(true);
      ClusterState clusterState = zkStateReader.getClusterState();
      Map<String, Slice> slices = clusterState.getSlicesMap(collection);
      assertNotNull("Could not find collection:" + collection, slices);
      for (Map.Entry<String, Slice> entry : slices.entrySet()) {
        Map<String, Replica> shards = entry.getValue().getReplicasMap();
        for (Map.Entry<String, Replica> shard : shards.entrySet()) {
          if (verbose) {
            LOG.debug("rstate:"
              + shard.getValue().getStr(ZkStateReader.STATE_PROP) + " live:"
              + clusterState.liveNodesContain(shard.getValue().getNodeName()));
          }
          String state = shard.getValue().getStr(ZkStateReader.STATE_PROP);
          if ((state.equals(ZkStateReader.RECOVERING)
              || state.equals(ZkStateReader.SYNC) || state
              .equals(ZkStateReader.DOWN))
              && clusterState.liveNodesContain(shard.getValue().getStr(
              ZkStateReader.NODE_NAME_PROP))) {
            sawLiveRecovering = true;
          }
        }
      }
      if (!sawLiveRecovering || cnt == timeoutSeconds) {
        if (!sawLiveRecovering) {
          if (verbose) {
            LOG.debug("no one is recovering");
          }
        } else {
          if (verbose) {
            LOG.debug("Gave up waiting for recovery to finish..");
          }
          if (failOnTimeout) {
            fail("There are still nodes recovering - waited for "
                + timeoutSeconds + " seconds");
            // won't get here
            return;
          }
        }
        cont = false;
      } else {
        Thread.sleep(1000);
      }
      cnt++;
    }
  } finally {
    LOG.info("Exiting solr wait");
  }
}
 
Example 20
Source File: DistribJoinFromCollectionTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setupCluster() throws Exception {
  final Path configDir = Paths.get(TEST_HOME(), "collection1", "conf");

  String configName = "solrCloudCollectionConfig";
  int nodeCount = 5;
  configureCluster(nodeCount)
     .addConfig(configName, configDir)
     .configure();
  
  
  Map<String, String> collectionProperties = new HashMap<>();
  collectionProperties.put("config", "solrconfig-tlog.xml" );
  collectionProperties.put("schema", "schema.xml"); 
  
  // create a collection holding data for the "to" side of the JOIN
  
  int shards = 2;
  int replicas = 2 ;
  CollectionAdminRequest.createCollection(toColl, configName, shards, replicas)
      .setProperties(collectionProperties)
      .process(cluster.getSolrClient());

  // get the set of nodes where replicas for the "to" collection exist
  Set<String> nodeSet = new HashSet<>();
  ZkStateReader zkStateReader = cluster.getSolrClient().getZkStateReader();
  ClusterState cs = zkStateReader.getClusterState();
  for (Slice slice : cs.getCollection(toColl).getActiveSlices())
    for (Replica replica : slice.getReplicas())
      nodeSet.add(replica.getNodeName());
  assertTrue(nodeSet.size() > 0);

  // deploy the "from" collection to all nodes where the "to" collection exists
  CollectionAdminRequest.createCollection(fromColl, configName, 1, 4)
      .setCreateNodeSet(String.join(",", nodeSet))
      .setProperties(collectionProperties)
      .process(cluster.getSolrClient());

  toDocId = indexDoc(toColl, 1001, "a", null, "b");
  indexDoc(fromColl, 2001, "a", "c", null);

  Thread.sleep(1000); // so the commits fire

}