org.apache.solr.common.cloud.DocRouter Java Examples

The following examples show how to use org.apache.solr.common.cloud.DocRouter. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SliceStateTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testDefaultSliceState() {
  Map<String, DocCollection> collectionStates = new HashMap<>();
  Set<String> liveNodes = new HashSet<>();
  liveNodes.add("node1");

  Map<String, Slice> slices = new HashMap<>();
  Map<String, Replica> sliceToProps = new HashMap<>();
  Map<String, Object> props = new HashMap<>();
  props.put("node_name", "127.0.0.1:10000_solr");
  props.put("core", "core1");

  Replica replica = new Replica("node1", props, "collection1", "shard1");
  sliceToProps.put("node1", replica);
  Slice slice = new Slice("shard1", sliceToProps, null, "collection1");
  assertSame("Default state not set to active", Slice.State.ACTIVE, slice.getState());
  slices.put("shard1", slice);
  collectionStates.put("collection1", new DocCollection("collection1", slices, null, DocRouter.DEFAULT));

  ClusterState clusterState = new ClusterState(liveNodes, collectionStates);
  byte[] bytes = Utils.toJSON(clusterState);
  ClusterState loadedClusterState = ClusterState.createFromJson(-1, bytes, liveNodes);

  assertSame("Default state not set to active", Slice.State.ACTIVE, loadedClusterState.getCollection("collection1").getSlice("shard1").getState());
}
 
Example #2
Source File: AssignTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testBuildCoreName() throws Exception {
  Path zkDir = createTempDir("zkData");
  ZkTestServer server = new ZkTestServer(zkDir);
  server.run();
  try (SolrZkClient zkClient = new SolrZkClient(server.getZkAddress(), 10000)) {
    // TODO: fix this to be independent of ZK
    ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
    Map<String, Slice> slices = new HashMap<>();
    slices.put("shard1", new Slice("shard1", new HashMap<>(), null,"collection1"));
    slices.put("shard2", new Slice("shard2", new HashMap<>(), null,"collection1"));

    DocCollection docCollection = new DocCollection("collection1", slices, null, DocRouter.DEFAULT);
    assertEquals("Core name pattern changed", "collection1_shard1_replica_n1", Assign.buildSolrCoreName(stateManager, docCollection, "shard1", Replica.Type.NRT));
    assertEquals("Core name pattern changed", "collection1_shard2_replica_p2", Assign.buildSolrCoreName(stateManager, docCollection, "shard2", Replica.Type.PULL));
  } finally {
    server.shutdown();
  }
}
 
Example #3
Source File: AssignTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testAssignNode() throws Exception {
  assumeWorkingMockito();
  
  SolrZkClient zkClient = mock(SolrZkClient.class);
  Map<String, byte[]> zkClientData = new HashMap<>();
  when(zkClient.setData(anyString(), any(), anyInt(), anyBoolean())).then(invocation -> {
      zkClientData.put(invocation.getArgument(0), invocation.getArgument(1));
      return null;
    }
  );
  when(zkClient.getData(anyString(), any(), any(), anyBoolean())).then(invocation ->
      zkClientData.get(invocation.getArgument(0)));
  // TODO: fix this to be independent of ZK
  ZkDistribStateManager stateManager = new ZkDistribStateManager(zkClient);
  String nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
  assertEquals("core_node1", nodeName);
  nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection2", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
  assertEquals("core_node1", nodeName);
  nodeName = Assign.assignCoreNodeName(stateManager, new DocCollection("collection1", new HashMap<>(), new HashMap<>(), DocRouter.DEFAULT));
  assertEquals("core_node2", nodeName);
}
 
Example #4
Source File: SplitByPrefixTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * find prefixes (shard keys) matching certain criteria
 */
public static List<Prefix> findPrefixes(int numToFind, int lowerBound, int upperBound) {
  CompositeIdRouter router = new CompositeIdRouter();

  ArrayList<Prefix> prefixes = new ArrayList<>();
  int maxTries = 1000000;
  int numFound = 0;
  for (int i=0; i<maxTries; i++) {
    String shardKey = Integer.toHexString(i)+"!";
    DocRouter.Range range = router.getSearchRangeSingle(shardKey, null, null);
    int lower = range.min;
    if (lower >= lowerBound && lower <= upperBound) {
      Prefix prefix = new Prefix();
      prefix.key = shardKey;
      prefix.range = range;
      prefixes.add(prefix);
      if (++numFound >= numToFind) break;
    }
  }

  Collections.sort(prefixes);

  return prefixes;
}
 
Example #5
Source File: SplitOp.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
static String toSplitString(Collection<DocRouter.Range> splits) throws Exception {
  if (splits == null) {
    return null;
  }

  StringBuilder sb = new StringBuilder();
  for (DocRouter.Range range : splits) {
    if (sb.length() > 0) {
      sb.append(",");
    }
    sb.append(range);
  }


  return sb.toString();
}
 
Example #6
Source File: SolrIndexSplitterTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private List<DocRouter.Range> getRanges(String id1, String id2) throws UnsupportedEncodingException {
  // find minHash/maxHash hash ranges
  byte[] bytes = id1.getBytes(StandardCharsets.UTF_8);
  int minHash = Hash.murmurhash3_x86_32(bytes, 0, bytes.length, 0);
  bytes = id2.getBytes(StandardCharsets.UTF_8);
  int maxHash = Hash.murmurhash3_x86_32(bytes, 0, bytes.length, 0);

  if (minHash > maxHash)  {
    int temp = maxHash;
    maxHash = minHash;
    minHash = temp;
  }

  PlainIdRouter router = new PlainIdRouter();
  DocRouter.Range fullRange = new DocRouter.Range(minHash, maxHash);
  return router.partitionRange(2, fullRange);
}
 
Example #7
Source File: SplitHandlerTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
void verifyContiguous(Collection<DocRouter.Range> results, DocRouter.Range currentRange) {
  if (results == null) return;

  assertTrue(results.size() > 1);

  DocRouter.Range prev = null;
  for (DocRouter.Range range : results) {
    if (prev == null) {
      // first range
      assertEquals(range.min, currentRange.min);
    } else {
      // make sure produced ranges are contiguous
      assertEquals(range.min, prev.max + 1);
    }
    prev = range;
  }
  assertEquals(prev.max, currentRange.max);
}
 
Example #8
Source File: CrossCollectionJoinQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private String createHashRangeFq() {
  if (routedByJoinKey) {
    ClusterState clusterState = searcher.getCore().getCoreContainer().getZkController().getClusterState();
    CloudDescriptor desc = searcher.getCore().getCoreDescriptor().getCloudDescriptor();
    Slice slice = clusterState.getCollection(desc.getCollectionName()).getSlicesMap().get(desc.getShardId());
    DocRouter.Range range = slice.getRange();

    // In CompositeIdRouter, the routing prefix only affects the top 16 bits
    int min = range.min & 0xffff0000;
    int max = range.max | 0x0000ffff;

    return String.format(Locale.ROOT, "{!hash_range f=%s l=%d u=%d}", fromField, min, max);
  } else {
    return null;
  }
}
 
Example #9
Source File: ClusterStateMockUtilTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testBuildClusterState_Simple() {
  try (ZkStateReader zkStateReader = ClusterStateMockUtil.buildClusterState("csr", "baseUrl1_")) {
    ClusterState clusterState = zkStateReader.getClusterState();
    assertNotNull(clusterState);
    assertEquals(1, clusterState.getCollectionStates().size());
    DocCollection collection1 = clusterState.getCollectionOrNull("collection1");
    assertNotNull(collection1);
    assertEquals(DocRouter.DEFAULT, collection1.getRouter());
    assertEquals(1, collection1.getActiveSlices().size());
    assertEquals(1, collection1.getSlices().size());
    Slice slice1 = collection1.getSlice("slice1");
    assertNotNull(slice1);
    assertEquals(1, slice1.getReplicas().size());
    Replica replica1 = slice1.getReplica("replica1");
    assertNotNull(replica1);
    assertEquals("baseUrl1_", replica1.getNodeName());
    assertEquals("slice1_replica1", replica1.getCoreName());
    assertEquals("http://baseUrl1", replica1.getBaseUrl());
    assertEquals("http://baseUrl1/slice1_replica1/", replica1.getCoreUrl());
    assertEquals(Replica.State.ACTIVE, replica1.getState());
    assertEquals(Replica.Type.NRT, replica1.getType());
  }
}
 
Example #10
Source File: DistributedZkUpdateProcessor.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** For {@link org.apache.solr.common.params.CollectionParams.CollectionAction#SPLITSHARD} */
protected List<SolrCmdDistributor.Node> getSubShardLeaders(DocCollection coll, String shardId, String docId, SolrInputDocument doc) {
  Collection<Slice> allSlices = coll.getSlices();
  List<SolrCmdDistributor.Node> nodes = null;
  for (Slice aslice : allSlices) {
    final Slice.State state = aslice.getState();
    if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY)  {
      DocRouter.Range myRange = coll.getSlice(shardId).getRange();
      if (myRange == null) myRange = new DocRouter.Range(Integer.MIN_VALUE, Integer.MAX_VALUE);
      boolean isSubset = aslice.getRange() != null && aslice.getRange().isSubsetOf(myRange);
      if (isSubset &&
          (docId == null // in case of deletes
              || coll.getRouter().isTargetSlice(docId, doc, req.getParams(), aslice.getName(), coll))) {
        Replica sliceLeader = aslice.getLeader();
        // slice leader can be null because node/shard is created zk before leader election
        if (sliceLeader != null && clusterState.liveNodesContain(sliceLeader.getNodeName()))  {
          if (nodes == null) nodes = new ArrayList<>();
          ZkCoreNodeProps nodeProps = new ZkCoreNodeProps(sliceLeader);
          nodes.add(new SolrCmdDistributor.StdNode(nodeProps, coll.getName(), aslice.getName()));
        }
      }
    }
  }
  return nodes;
}
 
Example #11
Source File: DistributedZkUpdateProcessor.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** For {@link org.apache.solr.common.params.CollectionParams.CollectionAction#SPLITSHARD} */
protected boolean amISubShardLeader(DocCollection coll, Slice parentSlice, String id, SolrInputDocument doc) throws InterruptedException {
  // Am I the leader of a shard in "construction/recovery" state?
  String myShardId = cloudDesc.getShardId();
  Slice mySlice = coll.getSlice(myShardId);
  final Slice.State state = mySlice.getState();
  if (state == Slice.State.CONSTRUCTION || state == Slice.State.RECOVERY) {
    Replica myLeader = zkController.getZkStateReader().getLeaderRetry(collection, myShardId);
    boolean amILeader = myLeader.getName().equals(cloudDesc.getCoreNodeName());
    if (amILeader) {
      // Does the document belong to my hash range as well?
      DocRouter.Range myRange = mySlice.getRange();
      if (myRange == null) myRange = new DocRouter.Range(Integer.MIN_VALUE, Integer.MAX_VALUE);
      if (parentSlice != null)  {
        boolean isSubset = parentSlice.getRange() != null && myRange.isSubsetOf(parentSlice.getRange());
        return isSubset && coll.getRouter().isTargetSlice(id, doc, req.getParams(), myShardId, coll);
      } else  {
        // delete by query case -- as long as I am a sub shard leader we're fine
        return true;
      }
    }
  }
  return false;
}
 
Example #12
Source File: ClusterStateMockUtilTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testBuildClusterState_ReplicaTypes() {
  try (ZkStateReader zkStateReader = ClusterStateMockUtil.buildClusterState("csntp", "baseUrl1_")) {
    ClusterState clusterState = zkStateReader.getClusterState();
    assertNotNull(clusterState);
    assertEquals(1, clusterState.getCollectionStates().size());
    DocCollection collection1 = clusterState.getCollectionOrNull("collection1");
    assertNotNull(collection1);
    assertEquals(DocRouter.DEFAULT, collection1.getRouter());
    assertEquals(1, collection1.getActiveSlices().size());
    assertEquals(1, collection1.getSlices().size());
    Slice slice1 = collection1.getSlice("slice1");
    assertNotNull(slice1);
    assertEquals(3, slice1.getReplicas().size());
    assertEquals(1, slice1.getReplicas(replica -> replica.getType() == Replica.Type.NRT).size());
    assertEquals(1, slice1.getReplicas(replica -> replica.getType() == Replica.Type.TLOG).size());
    assertEquals(1, slice1.getReplicas(replica -> replica.getType() == Replica.Type.PULL).size());
  }
}
 
Example #13
Source File: ClusterStateMockUtilTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testBuildClusterState_ReplicaStateAndType() {
  try (ZkStateReader zkStateReader = ClusterStateMockUtil.buildClusterState("csrStRpDnF", "baseUrl1_")) {
    ClusterState clusterState = zkStateReader.getClusterState();
    assertNotNull(clusterState);
    assertEquals(1, clusterState.getCollectionStates().size());
    DocCollection collection1 = clusterState.getCollectionOrNull("collection1");
    assertNotNull(collection1);
    assertEquals(DocRouter.DEFAULT, collection1.getRouter());
    assertEquals(1, collection1.getActiveSlices().size());
    assertEquals(1, collection1.getSlices().size());
    Slice slice1 = collection1.getSlice("slice1");
    assertNotNull(slice1);
    assertEquals(4, slice1.getReplicas().size());
    assertEquals(1, slice1.getReplicas(replica -> replica.getType() == Replica.Type.NRT && replica.getState() == Replica.State.ACTIVE).size());
    assertEquals(1, slice1.getReplicas(replica -> replica.getType() == Replica.Type.NRT && replica.getState() == Replica.State.RECOVERY_FAILED).size());
    assertEquals(1, slice1.getReplicas(replica -> replica.getType() == Replica.Type.TLOG && replica.getState() == Replica.State.RECOVERING).size());
    assertEquals(1, slice1.getReplicas(replica -> replica.getType() == Replica.Type.PULL && replica.getState() == Replica.State.DOWN).size());
  }
}
 
Example #14
Source File: TestHashPartitioner.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testCompositeHashCodes() throws Exception {
  DocRouter router = DocRouter.getDocRouter(CompositeIdRouter.NAME);
  assertTrue(router instanceof CompositeIdRouter);
  router = DocRouter.DEFAULT;
  assertTrue(router instanceof CompositeIdRouter);

  DocCollection coll = createCollection(4, router);
  doNormalIdHashing(coll);

  // ensure that the shard hashed to is only dependent on the first part of the compound key
  doId(coll, "b!foo", "shard1");
  doId(coll, "c!bar", "shard2");
  doId(coll, "d!baz", "shard3");
  doId(coll, "e!qux", "shard4");

  // syntax to specify bits.
  // Anything over 2 bits should give the same results as above (since only top 2 bits
  // affect our 4 slice collection).
  doId(coll, "b/2!foo", "shard1");
  doId(coll, "c/2!bar", "shard2");
  doId(coll, "d/2!baz", "shard3");
  doId(coll, "e/2!qux", "shard4");

  doId(coll, "b/32!foo", "shard1");
  doId(coll, "c/32!bar", "shard2");
  doId(coll, "d/32!baz", "shard3");
  doId(coll, "e/32!qux", "shard4");

  // no bits allocated to the first part (kind of odd why anyone would do that though)
  doIndex(coll, "foo/0!b", "shard1");
  doIndex(coll, "foo/0!c", "shard2");
  doIndex(coll, "foo/0!d", "shard3");
  doIndex(coll, "foo/0!e", "shard4");

  // means cover whole range on the query side
  doQuery(coll, "foo/0!", "shard1,shard2,shard3,shard4");

  doQuery(coll, "b/1!", "shard1,shard2");   // top bit of hash(b)==1, so shard1 and shard2
  doQuery(coll, "d/1!", "shard3,shard4");   // top bit of hash(b)==0, so shard3 and shard4
}
 
Example #15
Source File: TestHashPartitioner.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/** Make sure CompositeIdRouter doesn't throw exceptions for non-conforming IDs */
public void testNonConformingCompositeIds() throws Exception {
  DocRouter router = DocRouter.getDocRouter(CompositeIdRouter.NAME);
  DocCollection coll = createCollection(4, router);
  String[] ids = { "A!B!C!D", "!!!!!!", "A!!!!B", "A!!B!!C", "A/59!B", "A/8/!B/19/", 
                   "A!B/-5", "!/130!", "!!A/1000", "A//8!B///10!C////" };
  for (int i = 0 ; i < ids.length ; ++i) {
    try {
      Slice targetSlice = coll.getRouter().getTargetSlice(ids[i], null, null, null, coll);
      assertNotNull(targetSlice);
    } catch (Exception e) {
      throw new Exception("Exception routing id '" + ids[i] + "'", e);
    }
  }
}
 
Example #16
Source File: TestHashPartitioner.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void doQuery(DocCollection coll, String id, String expectedShards) {
  DocRouter router = coll.getRouter();
  Collection<Slice> slices = router.getSearchSlices(id, null, coll);

  List<String> expectedShardStr = StrUtils.splitSmart(expectedShards, ",", true);

  HashSet<String> expectedSet = new HashSet<>(expectedShardStr);
  HashSet<String> obtainedSet = new HashSet<>();
  for (Slice slice : slices) {
    obtainedSet.add(slice.getName());
  }

  assertEquals(slices.size(), obtainedSet.size());  // make sure no repeated slices
  assertEquals(expectedSet, obtainedSet);
}
 
Example #17
Source File: TestHashPartitioner.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/** Make sure CompositeIdRouter can route random IDs without throwing exceptions */
public void testRandomCompositeIds() throws Exception {
  DocRouter router = DocRouter.getDocRouter(CompositeIdRouter.NAME);
  DocCollection coll = createCollection(TestUtil.nextInt(random(), 1, 10), router);
  StringBuilder idBuilder = new StringBuilder();
  for (int i = 0 ; i < 10000 ; ++i) {
    idBuilder.setLength(0);
    int numParts = TestUtil.nextInt(random(), 1, 30);
    for (int part = 0; part < numParts; ++part) {
      switch (random().nextInt(5)) {
        case 0: idBuilder.append('!'); break;
        case 1: idBuilder.append('/'); break;
        case 2: idBuilder.append(TestUtil.nextInt(random(),-100, 1000)); break;
        default: {
          int length = TestUtil.nextInt(random(), 1, 10);
          char[] str = new char[length];
          TestUtil.randomFixedLengthUnicodeString(random(), str, 0, length);
          idBuilder.append(str);
          break;
        } 
      }
    }
    String id = idBuilder.toString();
    try {
      Slice targetSlice = router.getTargetSlice(id, null, null, null, coll);
      assertNotNull(targetSlice);
    } catch (Exception e) {
      throw new Exception("Exception routing id '" + id + "'", e);
    }
  }
}
 
Example #18
Source File: SolrIndexSplitterTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void doTestSplitByPaths(SolrIndexSplitter.SplitMethod splitMethod) throws Exception {
  LocalSolrQueryRequest request = null;
  try {
    // add two docs
    String id1 = "dorothy";
    assertU(adoc("id", id1));
    String id2 = "kansas";
    assertU(adoc("id", id2));
    assertU(commit());
    assertJQ(req("q", "*:*"), "/response/numFound==2");

    // find minHash/maxHash hash ranges
    List<DocRouter.Range> ranges = getRanges(id1, id2);

    request = lrf.makeRequest("q", "dummy");
    SolrQueryResponse rsp = new SolrQueryResponse();
    SplitIndexCommand command = new SplitIndexCommand(request, rsp,
        Lists.newArrayList(indexDir1.getAbsolutePath(), indexDir2.getAbsolutePath()), null, ranges, new PlainIdRouter(), null, null, splitMethod);
    doSplit(command);

    Directory directory = h.getCore().getDirectoryFactory().get(indexDir1.getAbsolutePath(),
        DirectoryFactory.DirContext.DEFAULT, h.getCore().getSolrConfig().indexConfig.lockType);
    DirectoryReader reader = DirectoryReader.open(directory);
    assertEquals("id:dorothy should be present in split index1", 1, reader.docFreq(new Term("id", "dorothy")));
    assertEquals("id:kansas should not be present in split index1", 0, reader.docFreq(new Term("id", "kansas")));
    assertEquals("split index1 should have only one document", 1, reader.numDocs());
    reader.close();
    h.getCore().getDirectoryFactory().release(directory);
    directory = h.getCore().getDirectoryFactory().get(indexDir2.getAbsolutePath(),
        DirectoryFactory.DirContext.DEFAULT, h.getCore().getSolrConfig().indexConfig.lockType);
    reader = DirectoryReader.open(directory);
    assertEquals("id:dorothy should not be present in split index2", 0, reader.docFreq(new Term("id", "dorothy")));
    assertEquals("id:kansas should be present in split index2", 1, reader.docFreq(new Term("id", "kansas")));
    assertEquals("split index2 should have only one document", 1, reader.numDocs());
    reader.close();
    h.getCore().getDirectoryFactory().release(directory);
  } finally {
    if (request != null) request.close(); // decrefs the searcher
  }
}
 
Example #19
Source File: SolrCloudPartitioner.java    From examples with Apache License 2.0 5 votes vote down vote up
@Override
public void setConf(Configuration conf) {
  this.conf = conf;
  this.shards = conf.getInt(SHARDS, -1);
  if (shards <= 0) {
    throw new IllegalArgumentException("Illegal shards: " + shards);
  }
  String zkHost = conf.get(ZKHOST);
  if (zkHost == null) {
    throw new IllegalArgumentException("zkHost must not be null");
  }
  String collection = conf.get(COLLECTION);
  if (collection == null) {
    throw new IllegalArgumentException("collection must not be null");
  }
  LOG.info("Using SolrCloud zkHost: {}, collection: {}", zkHost, collection);
  docCollection = new ZooKeeperInspector().extractDocCollection(zkHost, collection);
  if (docCollection == null) {
    throw new IllegalArgumentException("docCollection must not be null");
  }
  if (docCollection.getSlicesMap().size() != shards) {
    throw new IllegalArgumentException("Incompatible shards: + " + shards
        + " for docCollection: " + docCollection);
  }
  List<Slice> slices = new ZooKeeperInspector().getSortedSlices(docCollection.getSlices());
  if (slices.size() != shards) {
    throw new IllegalStateException("Incompatible sorted shards: + " + shards
        + " for docCollection: " + docCollection);
  }
  shardNumbers = new HashMap(10 * slices.size()); // sparse for performance
  for (int i = 0; i < slices.size(); i++) {
    shardNumbers.put(slices.get(i).getName(), i);
  }
  LOG.debug("Using SolrCloud docCollection: {}", docCollection);
  DocRouter docRouter = docCollection.getRouter();
  if (docRouter == null) {
    throw new IllegalArgumentException("docRouter must not be null");
  }
  LOG.info("Using SolrCloud docRouterClass: {}", docRouter.getClass());
}
 
Example #20
Source File: SplitShardCmd.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public static Slice getParentSlice(ClusterState clusterState, String collectionName, AtomicReference<String> slice, String splitKey) {
  DocCollection collection = clusterState.getCollection(collectionName);
  DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;

  Slice parentSlice;

  if (slice.get() == null) {
    if (router instanceof CompositeIdRouter) {
      Collection<Slice> searchSlices = router.getSearchSlicesSingle(splitKey, new ModifiableSolrParams(), collection);
      if (searchSlices.isEmpty()) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to find an active shard for split.key: " + splitKey);
      }
      if (searchSlices.size() > 1) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
            "Splitting a split.key: " + splitKey + " which spans multiple shards is not supported");
      }
      parentSlice = searchSlices.iterator().next();
      slice.set(parentSlice.getName());
      log.info("Split by route.key: {}, parent shard is: {} ", splitKey, slice);
    } else {
      throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
          "Split by route key can only be used with CompositeIdRouter or subclass. Found router: "
              + router.getClass().getName());
    }
  } else {
    parentSlice = collection.getSlice(slice.get());
  }

  if (parentSlice == null) {
    // no chance of the collection being null because ClusterState#getCollection(String) would have thrown
    // an exception already
    throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No shard with the specified name exists: " + slice);
  }
  return parentSlice;
}
 
Example #21
Source File: OverseerCollectionMessageHandler.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
DocRouter.Range intersect(DocRouter.Range a, DocRouter.Range b) {
  if (a == null || b == null || !a.overlaps(b)) {
    return null;
  } else if (a.isSubsetOf(b))
    return a;
  else if (b.isSubsetOf(a))
    return b;
  else if (b.includes(a.max)) {
    return new DocRouter.Range(b.min, a.max);
  } else  {
    return new DocRouter.Range(a.min, b.max);
  }
}
 
Example #22
Source File: ShardSplitTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void splitShard(String collection, String shardId, List<DocRouter.Range> subRanges, String splitKey, boolean offline) throws SolrServerException, IOException {
  ModifiableSolrParams params = new ModifiableSolrParams();
  params.set("action", CollectionParams.CollectionAction.SPLITSHARD.toString());
  params.set("timing", "true");
  params.set("offline", String.valueOf(offline));
  params.set("collection", collection);
  if (shardId != null)  {
    params.set("shard", shardId);
  }
  if (subRanges != null)  {
    StringBuilder ranges = new StringBuilder();
    for (int i = 0; i < subRanges.size(); i++) {
      DocRouter.Range subRange = subRanges.get(i);
      ranges.append(subRange.toString());
      if (i < subRanges.size() - 1)
        ranges.append(",");
    }
    params.set("ranges", ranges.toString());
  }
  if (splitKey != null) {
    params.set("split.key", splitKey);
  }
  @SuppressWarnings({"rawtypes"})
  SolrRequest request = new QueryRequest(params);
  request.setPath("/admin/collections");

  String baseUrl = ((HttpSolrClient) shardToJetty.get(SHARD1).get(0).client.getSolrClient()).getBaseURL();
  baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());

  try (HttpSolrClient baseServer = getHttpSolrClient(baseUrl, 30000, 60000 * 5)) {
    NamedList<Object> rsp = baseServer.request(request);
    if (log.isInfoEnabled()) {
      log.info("Shard split response: {}", Utils.toJSONString(rsp));
    }
  }
}
 
Example #23
Source File: ShardSplitTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void indexAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id, int n, Set<String> documentIds) throws Exception {
  index("id", id, "n_ti", n);

  int idx = getHashRangeIdx(router, ranges, id);
  if (idx != -1)  {
    docCounts[idx]++;
    documentIds.add(String.valueOf(id));
  }
}
 
Example #24
Source File: ShardSplitTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
protected void deleteAndUpdateCount(DocRouter router, List<DocRouter.Range> ranges, int[] docCounts, String id) throws Exception {
  controlClient.deleteById(id);
  cloudClient.deleteById(id);

  int idx = getHashRangeIdx(router, ranges, id);
  if (idx != -1)  {
    docCounts[idx]--;
  }
}
 
Example #25
Source File: ShardSplitTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public static int getHashRangeIdx(DocRouter router, List<DocRouter.Range> ranges, String id) {
  int hash = 0;
  if (router instanceof HashBasedRouter) {
    HashBasedRouter hashBasedRouter = (HashBasedRouter) router;
    hash = hashBasedRouter.sliceHash(id, null, null,null);
  }
  for (int i = 0; i < ranges.size(); i++) {
    DocRouter.Range range = ranges.get(i);
    if (range.includes(hash))
      return i;
  }
  return -1;
}
 
Example #26
Source File: SolrIndexSplitter.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
SplittingQuery(int partition, SchemaField field, DocRouter.Range[] rangesArr, HashBasedRouter hashRouter, String splitKey,
               Map<IndexReader.CacheKey, FixedBitSet[]> docsToDelete, AtomicInteger currentPartition) {
  this.partition = partition;
  this.field = field;
  this.rangesArr = rangesArr;
  this.hashRouter = hashRouter;
  this.splitKey = splitKey;
  this.docsToDelete = docsToDelete;
  this.currentPartition = currentPartition;
}
 
Example #27
Source File: SolrIndexSplitter.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public SolrIndexSplitter(SplitIndexCommand cmd) {
  searcher = cmd.getReq().getSearcher();
  ranges = cmd.ranges;
  paths = cmd.paths;
  cores = cmd.cores;
  hashRouter = cmd.router instanceof HashBasedRouter ? (HashBasedRouter)cmd.router : null;

  if (ranges == null) {
    numPieces =  paths != null ? paths.size() : cores.size();
  } else  {
    numPieces = ranges.size();
    rangesArr = ranges.toArray(new DocRouter.Range[ranges.size()]);
  }
  routeFieldName = cmd.routeFieldName;
  if (routeFieldName == null) {
    field = searcher.getSchema().getUniqueKeyField();
  } else  {
    field = searcher.getSchema().getField(routeFieldName);
  }
  if (cmd.splitKey != null) {
    splitKey = getRouteKey(cmd.splitKey);
  }
  if (cores == null) {
    this.splitMethod = SplitMethod.REWRITE;
  } else {
    this.splitMethod = cmd.splitMethod;
  }
}
 
Example #28
Source File: SplitIndexCommand.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public SplitIndexCommand(SolrQueryRequest req, SolrQueryResponse rsp, List<String> paths, List<SolrCore> cores, List<DocRouter.Range> ranges,
                         DocRouter router, String routeFieldName, String splitKey, SolrIndexSplitter.SplitMethod splitMethod) {
  super(req);
  this.rsp = rsp;
  this.paths = paths;
  this.cores = cores;
  this.ranges = ranges;
  this.router = router;
  this.routeFieldName = routeFieldName;
  this.splitKey = splitKey;
  this.splitMethod = splitMethod;
}
 
Example #29
Source File: ReplicationFactorTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private int calcByIdRf(Set<Integer> byIDs, String testCollectionName, String shardWithOne) {
  ZkController zkController = jettys.get(0).getCoreContainer().getZkController();
  DocCollection coll = zkController.getClusterState().getCollection(testCollectionName);
  int retval = 2;
  for (int id : byIDs) {
    DocRouter router = coll.getRouter();
    if (shardWithOne.equals(router.getTargetSlice(Integer.toString(id), null, null, null, coll).getName())) {
      retval = 1;
    }
  }
  return retval;
}
 
Example #30
Source File: OverseerCollectionConfigSetProcessorTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void handleCreateCollMessage(byte[] bytes) {
  log.info("track created replicas / collections");
  try {
    ZkNodeProps props = ZkNodeProps.load(bytes);
    if (CollectionParams.CollectionAction.CREATE.isEqual(props.getStr("operation"))) {
      String collName = props.getStr("name");
      if (collName != null) collectionsSet.put(collName, new ClusterState.CollectionRef(
          new DocCollection(collName, new HashMap<>(), props.getProperties(), DocRouter.DEFAULT)));
    }
    if (CollectionParams.CollectionAction.ADDREPLICA.isEqual(props.getStr("operation"))) {
      replicas.add(props);
    }
  } catch (Exception e) {}
}