Java Code Examples for org.apache.solr.client.solrj.request.UpdateRequest#commit()

The following examples show how to use org.apache.solr.client.solrj.request.UpdateRequest#commit() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StreamExpressionTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testEvalStream() throws Exception {
  UpdateRequest updateRequest = new UpdateRequest();
  updateRequest.add(id, "hello", "test_t", "l b c d c");
  updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);

  String expr = "eval(select(echo(\"search("+COLLECTIONORALIAS+", q=\\\"*:*\\\", fl=id, sort=\\\"id desc\\\")\"), echo as expr_s))";
  ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
  paramsLoc.set("expr", expr);
  paramsLoc.set("qt", "/stream");

  String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
  TupleStream solrStream = new SolrStream(url, paramsLoc);

  StreamContext context = new StreamContext();
  solrStream.setStreamContext(context);
  List<Tuple> tuples = getTuples(solrStream);
  assertTrue(tuples.size() == 1);
  String s = (String)tuples.get(0).get("id");
  assertTrue(s.equals("hello"));
}
 
Example 2
Source File: StreamExpressionTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testSearchBacktick() throws Exception {
  UpdateRequest updateRequest = new UpdateRequest();
  updateRequest.add(id, "hello", "test_t", "l b c d c e");
  updateRequest.add(id, "hello1", "test_t", "l b c d c");
  updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);

  String expr = "search("+COLLECTIONORALIAS+", q=\"`c d c e`\", fl=\"id,test_t\", sort=\"id desc\")";

  ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
  paramsLoc.set("expr", expr);
  paramsLoc.set("qt", "/stream");

  String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
  TupleStream solrStream = new SolrStream(url, paramsLoc);

  StreamContext context = new StreamContext();
  solrStream.setStreamContext(context);
  List<Tuple> tuples = getTuples(solrStream);
  assertTrue(tuples.size() == 1);
  Tuple tuple = tuples.get(0);
  assertTrue(tuple.get("id").equals("hello"));
  assertTrue(tuple.get("test_t").equals("l b c d c e"));
}
 
Example 3
Source File: BasicAuthIntegrationTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void addDocument(String user, String pass, String... fields) throws IOException, SolrServerException {
  SolrInputDocument doc = new SolrInputDocument();
  boolean isKey = true;
  String key = null;
  for (String field : fields) {
    if (isKey) {
      key = field;
      isKey = false;
    } else {
      doc.setField(key, field);
    }
  }
  UpdateRequest update = new UpdateRequest();
  update.setBasicAuthCredentials(user, pass);
  update.add(doc);
  cluster.getSolrClient().request(update, COLLECTION);
  update.commit(cluster.getSolrClient(), COLLECTION);
}
 
Example 4
Source File: AtomicUpdateRemovalJavabinTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void setupCluster() throws Exception {
  configureCluster(1)
      .addConfig("conf", configset("cloud-dynamic"))
      .configure();

  CollectionAdminRequest.createCollection(COLLECTION, "conf", NUM_SHARDS, NUM_REPLICAS)
      .setMaxShardsPerNode(MAX_SHARDS_PER_NODE)
      .process(cluster.getSolrClient());

  cluster.waitForActiveCollection(COLLECTION, 1, 1);

  final SolrInputDocument doc1 = sdoc(
      "id", "1",
      "title_s", "title_1", "title_s", "title_2",
      "tv_mv_text", "text_1", "tv_mv_text", "text_2",
      "count_is", 1, "count_is", 2,
      "count_md", 1.0, "count_md", 2.0,
      "timestamps_mdt", DATE_1, "timestamps_mdt", DATE_2);
  final UpdateRequest req = new UpdateRequest()
      .add(doc1);
  req.commit(cluster.getSolrClient(), COLLECTION);
}
 
Example 5
Source File: StreamExpressionTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Test
public void testTupleStream() throws Exception {
  UpdateRequest updateRequest = new UpdateRequest();
  updateRequest.add(id, "hello", "test_t", "l b c d c e");
  updateRequest.add(id, "hello1", "test_t", "l b c d c");
  updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);

  String expr = "search("+COLLECTIONORALIAS+", q=\"`c d c`\", fl=\"id,test_t\", sort=\"id desc\")";

  //Add a Stream and an Evaluator to the Tuple.
  String cat = "tuple(results="+expr+", sum=add(1,1))";
  ModifiableSolrParams paramsLoc = new ModifiableSolrParams();
  paramsLoc.set("expr", cat);
  paramsLoc.set("qt", "/stream");

  String url = cluster.getJettySolrRunners().get(0).getBaseUrl().toString()+"/"+COLLECTIONORALIAS;
  TupleStream solrStream = new SolrStream(url, paramsLoc);

  StreamContext context = new StreamContext();
  solrStream.setStreamContext(context);
  List<Tuple> tuples = getTuples(solrStream);
  assertTrue(tuples.size() == 1);
  @SuppressWarnings({"unchecked", "rawtypes"})
  List<Map> results  = (List<Map>)tuples.get(0).get("results");
  assertTrue(results.get(0).get("id").equals("hello1"));
  assertTrue(results.get(0).get("test_t").equals("l b c d c"));
  assertTrue(results.get(1).get("id").equals("hello"));
  assertTrue(results.get(1).get("test_t").equals("l b c d c e"));

  assertTrue(tuples.get(0).getLong("sum").equals(2L));

}
 
Example 6
Source File: TestInPlaceUpdateWithRouteField.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Test
public void testUpdatingDocValuesWithRouteField() throws Exception {

   new UpdateRequest()
    .deleteByQuery("*:*").commit(cluster.getSolrClient(), COLLECTION);
  
   new UpdateRequest().add(createDocs(NUMBER_OF_DOCS)).commit(cluster.getSolrClient(), COLLECTION);

  int id = TestUtil.nextInt(random(), 1, NUMBER_OF_DOCS - 1);
  SolrDocument solrDocument = queryDoc(id);
  Long initialVersion = (Long) solrDocument.get("_version_");
  Integer luceneDocId = (Integer) solrDocument.get("[docid]");
  String shardName = (String) solrDocument.get("shardName");
  Assert.assertThat(solrDocument.get("inplace_updatable_int"), is(id));

  int newDocValue = TestUtil.nextInt(random(), 1, 2 * NUMBER_OF_DOCS - 1);
  SolrInputDocument sdoc = sdoc("id", ""+id,
      // use route field in update command
      "shardName", shardName,
      "inplace_updatable_int", map("set", newDocValue));
  
  UpdateRequest updateRequest = new UpdateRequest()
      .add(sdoc);
  updateRequest.commit(cluster.getSolrClient(), COLLECTION);
  solrDocument = queryDoc(id);
  Long newVersion = (Long) solrDocument.get("_version_");
  Assert.assertTrue("Version of updated document must be greater than original one",
      newVersion > initialVersion);
  Assert.assertThat( "Doc value must be updated", solrDocument.get("inplace_updatable_int"), is(newDocValue));
  Assert.assertThat("Lucene doc id should not be changed for In-Place Updates.", solrDocument.get("[docid]"), is(luceneDocId));

  sdoc.remove("shardName");
  checkWrongCommandFailure(sdoc);

  sdoc.addField("shardName",  map("set", "newShardName"));
  checkWrongCommandFailure(sdoc);
}
 
Example 7
Source File: TestDownShardTolerantSearch.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Test
public void searchingShouldFailWithoutTolerantSearchSetToTrue() throws Exception {

  CollectionAdminRequest.createCollection("tolerant", "conf", 2, 1)
      .process(cluster.getSolrClient());

  UpdateRequest update = new UpdateRequest();
  for (int i = 0; i < 100; i++) {
    update.add("id", Integer.toString(i));
  }
  update.commit(cluster.getSolrClient(), "tolerant");

  QueryResponse response = cluster.getSolrClient().query("tolerant", new SolrQuery("*:*").setRows(1));
  assertThat(response.getStatus(), is(0));
  assertThat(response.getResults().getNumFound(), is(100L));

  JettySolrRunner stoppedServer = cluster.stopJettySolrRunner(0);
  
  cluster.waitForJettyToStop(stoppedServer);

  response = cluster.getSolrClient().query("tolerant", new SolrQuery("*:*").setRows(1).setParam(ShardParams.SHARDS_TOLERANT, true));
  assertThat(response.getStatus(), is(0));
  assertTrue(response.getResults().getNumFound() > 0);

  SolrServerException e = expectThrows(SolrServerException.class,
      "Request should have failed because we killed shard1 jetty",
      () -> cluster.getSolrClient().query("tolerant", new SolrQuery("*:*").setRows(1)
          .setParam(ShardParams.SHARDS_TOLERANT, false))
  );
  assertNotNull(e.getCause());
  assertTrue("Error message from server should have the name of the down shard",
      e.getCause().getMessage().contains("shard"));
}
 
Example 8
Source File: StreamExpressionTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
public void testBasicTextLogitStream() throws Exception {
  Assume.assumeTrue(!useAlias);

  CollectionAdminRequest.createCollection("destinationCollection", "ml", 2, 1).process(cluster.getSolrClient());
  cluster.waitForActiveCollection("destinationCollection", 2, 2);

  UpdateRequest updateRequest = new UpdateRequest();
  for (int i = 0; i < 5000; i+=2) {
    updateRequest.add(id, String.valueOf(i), "tv_text", "a b c c d", "out_i", "1");
    updateRequest.add(id, String.valueOf(i+1), "tv_text", "a b e e f", "out_i", "0");
  }
  updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);

  StreamExpression expression;
  TupleStream stream;
  List<Tuple> tuples;
  StreamContext streamContext = new StreamContext();
  SolrClientCache solrClientCache = new SolrClientCache();
  streamContext.setSolrClientCache(solrClientCache);

  StreamFactory factory = new StreamFactory()
      .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress())
      .withCollectionZkHost("destinationCollection", cluster.getZkServer().getZkAddress())
      .withFunctionName("features", FeaturesSelectionStream.class)
      .withFunctionName("train", TextLogitStream.class)
      .withFunctionName("search", CloudSolrStream.class)
      .withFunctionName("update", UpdateStream.class);
  try {
    expression = StreamExpressionParser.parse("features(collection1, q=\"*:*\", featureSet=\"first\", field=\"tv_text\", outcome=\"out_i\", numTerms=4)");
    stream = new FeaturesSelectionStream(expression, factory);
    stream.setStreamContext(streamContext);
    tuples = getTuples(stream);

    assert (tuples.size() == 4);
    HashSet<String> terms = new HashSet<>();
    for (Tuple tuple : tuples) {
      terms.add((String) tuple.get("term_s"));
    }
    assertTrue(terms.contains("d"));
    assertTrue(terms.contains("c"));
    assertTrue(terms.contains("e"));
    assertTrue(terms.contains("f"));

    String textLogitExpression = "train(" +
        "collection1, " +
        "features(collection1, q=\"*:*\", featureSet=\"first\", field=\"tv_text\", outcome=\"out_i\", numTerms=4)," +
        "q=\"*:*\", " +
        "name=\"model\", " +
        "field=\"tv_text\", " +
        "outcome=\"out_i\", " +
        "maxIterations=100)";
    stream = factory.constructStream(textLogitExpression);
    stream.setStreamContext(streamContext);
    tuples = getTuples(stream);
    Tuple lastTuple = tuples.get(tuples.size() - 1);
    List<Double> lastWeights = lastTuple.getDoubles("weights_ds");
    Double[] lastWeightsArray = lastWeights.toArray(new Double[lastWeights.size()]);

    // first feature is bias value
    Double[] testRecord = {1.0, 1.17, 0.691, 0.0, 0.0};
    double d = sum(multiply(testRecord, lastWeightsArray));
    double prob = sigmoid(d);
    assertEquals(prob, 1.0, 0.1);

    // first feature is bias value
    Double[] testRecord2 = {1.0, 0.0, 0.0, 1.17, 0.691};
    d = sum(multiply(testRecord2, lastWeightsArray));
    prob = sigmoid(d);
    assertEquals(prob, 0, 0.1);

    stream = factory.constructStream("update(destinationCollection, batchSize=5, " + textLogitExpression + ")");
    getTuples(stream);
    cluster.getSolrClient().commit("destinationCollection");

    stream = factory.constructStream("search(destinationCollection, " +
        "q=*:*, " +
        "fl=\"iteration_i,* \", " +
        "rows=100, " +
        "sort=\"iteration_i desc\")");
    stream.setStreamContext(streamContext);
    tuples = getTuples(stream);
    assertEquals(100, tuples.size());
    Tuple lastModel = tuples.get(0);
    ClassificationEvaluation evaluation = ClassificationEvaluation.create(lastModel.getFields());
    assertTrue(evaluation.getF1() >= 1.0);
    assertEquals(Math.log(5000.0 / (2500 + 1)), lastModel.getDoubles("idfs_ds").get(0), 0.0001);
    // make sure the tuples is retrieved in correct order
    Tuple firstTuple = tuples.get(99);
    assertEquals(1L, (long) firstTuple.getLong("iteration_i"));
  } finally {
    CollectionAdminRequest.deleteCollection("destinationCollection").process(cluster.getSolrClient());
    solrClientCache.close();
  }
}
 
Example 9
Source File: StreamExpressionTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
public void testFeaturesSelectionStream() throws Exception {
  Assume.assumeTrue(!useAlias);

  CollectionAdminRequest.createCollection("destinationCollection", "ml", 2, 1).process(cluster.getSolrClient());
  cluster.waitForActiveCollection("destinationCollection", 2, 2);

  UpdateRequest updateRequest = new UpdateRequest();
  for (int i = 0; i < 5000; i+=2) {
    updateRequest.add(id, String.valueOf(i), "whitetok", "a b c d", "out_i", "1");
    updateRequest.add(id, String.valueOf(i+1), "whitetok", "a b e f", "out_i", "0");
  }
  updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);

  StreamExpression expression;
  TupleStream stream;
  List<Tuple> tuples;
  StreamContext streamContext = new StreamContext();
  SolrClientCache solrClientCache = new SolrClientCache();
  streamContext.setSolrClientCache(solrClientCache);

  StreamFactory factory = new StreamFactory()
      .withCollectionZkHost("collection1", cluster.getZkServer().getZkAddress())
      .withCollectionZkHost("destinationCollection", cluster.getZkServer().getZkAddress())
      .withFunctionName("featuresSelection", FeaturesSelectionStream.class)
      .withFunctionName("search", CloudSolrStream.class)
      .withFunctionName("update", UpdateStream.class);


  try {
    String featuresExpression = "featuresSelection(collection1, q=\"*:*\", featureSet=\"first\", field=\"whitetok\", outcome=\"out_i\", numTerms=4)";
    // basic
    expression = StreamExpressionParser.parse(featuresExpression);
    stream = new FeaturesSelectionStream(expression, factory);
    stream.setStreamContext(streamContext);
    tuples = getTuples(stream);

    assert (tuples.size() == 4);

    assertTrue(tuples.get(0).get("term_s").equals("c"));
    assertTrue(tuples.get(1).get("term_s").equals("d"));
    assertTrue(tuples.get(2).get("term_s").equals("e"));
    assertTrue(tuples.get(3).get("term_s").equals("f"));

    // update
    expression = StreamExpressionParser.parse("update(destinationCollection, " + featuresExpression + ")");
    stream = new UpdateStream(expression, factory);
    stream.setStreamContext(streamContext);
    getTuples(stream);
    cluster.getSolrClient().commit("destinationCollection");

    expression = StreamExpressionParser.parse("search(destinationCollection, q=featureSet_s:first, fl=\"index_i, term_s\", sort=\"index_i asc\")");
    stream = new CloudSolrStream(expression, factory);
    stream.setStreamContext(streamContext);
    tuples = getTuples(stream);
    assertEquals(4, tuples.size());
    assertTrue(tuples.get(0).get("term_s").equals("c"));
    assertTrue(tuples.get(1).get("term_s").equals("d"));
    assertTrue(tuples.get(2).get("term_s").equals("e"));
    assertTrue(tuples.get(3).get("term_s").equals("f"));
  } finally {
    CollectionAdminRequest.deleteCollection("destinationCollection").process(cluster.getSolrClient());
    solrClientCache.close();
  }
}
 
Example 10
Source File: StreamExpressionTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
public void tooLargeForGetRequest() throws IOException, SolrServerException {
  // Test expressions which are larger than GET can handle
  UpdateRequest updateRequest = new UpdateRequest();
  for (int i = 0; i < 10; i++) {
    updateRequest.add(id, "a"+i, "test_t", "a b c d m l");
  }
  for(int i=1; i<=50; i++) {
    updateRequest.add(id, "id_"+(i),"test_dt", getDateString("2016", "5", "1"), "price_f", "400.00");
  }
  updateRequest.commit(cluster.getSolrClient(), COLLECTIONORALIAS);

  SolrClientCache cache = new SolrClientCache();
  StreamContext streamContext = new StreamContext();
  streamContext.setSolrClientCache(cache);
  // use filter() to allow being parsed as 'terms in set' query instead of a (weighted/scored) BooleanQuery
  // so we don't trip too many boolean clauses
  String longQuery = "\"filter(id:(" + IntStream.range(0, 4000).mapToObj(i -> "a").collect(Collectors.joining(" ", "", "")) + "))\"";

  try {
    assertSuccess("significantTerms("+COLLECTIONORALIAS+", q="+longQuery+", field=\"test_t\", limit=3, minTermLength=1, maxDocFreq=\".5\")", streamContext);
    String expr = "timeseries("+COLLECTIONORALIAS+", q="+longQuery+", start=\"2013-01-01T01:00:00.000Z\", " +
            "end=\"2016-12-01T01:00:00.000Z\", " +
            "gap=\"+1YEAR\", " +
            "field=\"test_dt\", " +
            "format=\"yyyy\", " +
            "count(*), sum(price_f), max(price_f), min(price_f))";
    assertSuccess(expr, streamContext);
    expr = "facet("
                  +   "collection1, "
                  +   "q="+longQuery+", "
                  +   "fl=\"a_s,a_i,a_f\", "
                  +   "sort=\"a_s asc\", "
                  +   "buckets=\"a_s\", "
                  +   "bucketSorts=\"sum(a_i) asc\", "
                  +   "bucketSizeLimit=100, "
                  +   "sum(a_i), sum(a_f), "
                  +   "min(a_i), min(a_f), "
                  +   "max(a_i), max(a_f), "
                  +   "avg(a_i), avg(a_f), "
                  +   "count(*)"
                  + ")";
    assertSuccess(expr, streamContext);
    expr = "stats(" + COLLECTIONORALIAS + ", q="+longQuery+", sum(a_i), sum(a_f), min(a_i), min(a_f), max(a_i), max(a_f), avg(a_i), avg(a_f), count(*))";
    assertSuccess(expr, streamContext);
    expr = "search(" + COLLECTIONORALIAS + ", q="+longQuery+", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\")";
    assertSuccess(expr, streamContext);
    expr = "random(" + COLLECTIONORALIAS + ", q="+longQuery+", rows=\"1000\", fl=\"id, a_i\")";
    assertSuccess(expr, streamContext);
  } finally {
    cache.close();
  }
}
 
Example 11
Source File: LegacyNoFacetCloudTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void populate() throws Exception {
  intTestStart = new ArrayList<>();
  longTestStart = new ArrayList<>();
  floatTestStart = new ArrayList<>();
  doubleTestStart = new ArrayList<>();
  dateTestStart = new ArrayList<>();
  stringTestStart = new ArrayList<>();
  intMissing = 0;
  longMissing = 0;
  doubleMissing = 0;
  floatMissing = 0;
  dateMissing = 0;
  stringMissing = 0;

  UpdateRequest req = new UpdateRequest();
  for (int j = 0; j < NUM_LOOPS; ++j) {
    int i = j%INT;
    long l = j%LONG;
    float f = j%FLOAT;
    double d = j%DOUBLE;
    String dt = (1800+j%DATE) + "-12-31T23:59:59Z";
    String s = "str" + (j%STRING);
    List<String> fields = new ArrayList<>();
    fields.add("id"); fields.add("1000"+j);

    if( i != 0 ){
      fields.add("int_id"); fields.add("" + i);
      intTestStart.add(i);
    } else intMissing++;

    if( l != 0l ){
      fields.add("long_ld"); fields.add("" + l);
      longTestStart.add(l);
    } else longMissing++;

    if( f != 0.0f ){
      fields.add("float_fd"); fields.add("" + f);
      floatTestStart.add(f);
    } else floatMissing++;

    if( d != 0.0d ){
      fields.add("double_dd"); fields.add("" + d);
      doubleTestStart.add(d);
    } else doubleMissing++;

    if( (j%DATE) != 0 ){
      fields.add("date_dtd"); fields.add(dt);
      dateTestStart.add(dt);
    } else dateMissing++;

    if( (j%STRING) != 0 ){
      fields.add("string_sd"); fields.add(s);
      stringTestStart.add(s);
    } else stringMissing++;

    req.add(fields.toArray(new String[0]));
  }
  req.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
}
 
Example 12
Source File: LegacyQueryFacetCloudTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Before
public void beforeTest() throws Exception {

  //INT
  int1TestStart.add(new ArrayList<Integer>());
  int2TestStart.add(new ArrayList<Integer>());

  //LONG
  longTestStart.add(new ArrayList<Long>());
  longTestStart.add(new ArrayList<Long>());

  //FLOAT
  floatTestStart.add(new ArrayList<Float>());
  floatTestStart.add(new ArrayList<Float>());
  floatTestStart.add(new ArrayList<Float>());

  UpdateRequest req = new UpdateRequest();

  for (int j = 0; j < NUM_LOOPS; ++j) {
    int i = j%INT;
    long l = j%LONG;
    float f = j%FLOAT;
    double d = j%DOUBLE;
    int dt = j%DATE;
    int s = j%STRING;

    List<String> fields = new ArrayList<>();
    fields.add("id"); fields.add("1000"+j);
    fields.add("int_id"); fields.add("" + i);
    fields.add("long_ld"); fields.add("" + l);
    fields.add("float_fd"); fields.add("" + f);
    fields.add("double_dd"); fields.add("" + d);
    fields.add("date_dtd"); fields.add((1000+dt) + "-01-01T23:59:59Z");
    fields.add("string_sd"); fields.add("abc" + s);
    req.add(fields.toArray(new String[0]));

    if (f<=50) {
      int1TestStart.get(0).add(i);
    }
    if (f<=30) {
      int2TestStart.get(0).add(i);
    }
    if (s == 1) {
      longTestStart.get(0).add(l);
    }
    if (s == 2) {
      longTestStart.get(1).add(l);
    }
    if (l>=30) {
      floatTestStart.get(0).add(f);
    }
    if (d<=50) {
      floatTestStart.get(1).add(f);
    }
    if (l>=20) {
      floatTestStart.get(2).add(f);
    }
  }

  req.commit(cluster.getSolrClient(), COLLECTIONORALIAS);
}
 
Example 13
Source File: TestQueryingOnDownCollection.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Test
/**
 * Assert that requests to "down collection", i.e. a collection which has all replicas in down state
 * (but are hosted on nodes that are live), fail fast and throw meaningful exceptions
 */
public void testQueryToDownCollectionShouldFailFast() throws Exception {

  CollectionAdminRequest.createCollection(COLLECTION_NAME, "conf", 2, 1)
      .setBasicAuthCredentials(USERNAME, PASSWORD)
      .process(cluster.getSolrClient());

  // Add some dummy documents
  UpdateRequest update = (UpdateRequest) new UpdateRequest().setBasicAuthCredentials(USERNAME, PASSWORD);
  for (int i = 0; i < 100; i++) {
    update.add("id", Integer.toString(i));
  }
  update.commit(cluster.getSolrClient(), COLLECTION_NAME);

  // Bring down replicas but keep nodes up. This could've been done by some combinations of collections API operations;
  // however, to make it faster, altering cluster state directly! ;-)
  downAllReplicas();

  // assert all replicas are in down state
  List<Replica> replicas = getCollectionState(COLLECTION_NAME).getReplicas();
  for (Replica replica: replicas){
    assertEquals(replica.getState(), Replica.State.DOWN);
  }

  // assert all nodes as active
  assertEquals(3, cluster.getSolrClient().getClusterStateProvider().getLiveNodes().size());

  SolrClient client = cluster.getJettySolrRunner(0).newClient();

  @SuppressWarnings({"rawtypes"})
  SolrRequest req = new QueryRequest(new SolrQuery("*:*").setRows(0)).setBasicAuthCredentials(USERNAME, PASSWORD);

  // Without the SOLR-13793 fix, this causes requests to "down collection" to pile up (until the nodes run out 
  // of serviceable threads and they crash, even for other collections hosted on the nodes).
  SolrException error = expectThrows(SolrException.class,
      "Request should fail after trying all replica nodes once",
      () -> client.request(req, COLLECTION_NAME)
  );

  client.close();

  assertEquals(error.code(), SolrException.ErrorCode.INVALID_STATE.code);
  assertTrue(error.getMessage().contains("No active replicas found for collection: " + COLLECTION_NAME));

  // run same set of tests on v2 client which uses V2HttpCall
  Http2SolrClient v2Client = new Http2SolrClient.Builder(cluster.getJettySolrRunner(0).getBaseUrl().toString())
      .build();

  error = expectThrows(SolrException.class,
      "Request should fail after trying all replica nodes once",
      () -> v2Client.request(req, COLLECTION_NAME)
  );

  v2Client.close();

  assertEquals(error.code(), SolrException.ErrorCode.INVALID_STATE.code);
  assertTrue(error.getMessage().contains("No active replicas found for collection: " + COLLECTION_NAME));
}
 
Example 14
Source File: TupleStreamDataSetIteratorTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setupCluster() throws Exception {

  final int numShards = 2;
  final int numReplicas = 2;
  final int maxShardsPerNode = 1;
  final int nodeCount = (numShards*numReplicas + (maxShardsPerNode-1))/maxShardsPerNode;

  // create and configure cluster
  configureCluster(nodeCount)
      .addConfig("conf", configset("mini"))
      .configure();

  // create an empty collection
  CollectionAdminRequest.createCollection("mySolrCollection", "conf", numShards, numReplicas)
      .setMaxShardsPerNode(maxShardsPerNode)
      .process(cluster.getSolrClient());

  // compose an update request
  final UpdateRequest updateRequest = new UpdateRequest();

  final List<Integer> docIds = new ArrayList<Integer>();
  for (int phase = 1; phase <= 2; ++phase) {
    int docIdsIdx = 0;

    if (phase == 2) {
      Collections.shuffle(docIds);
    }

    final int increment = 32;

    for (int b = 0; b <= 256; b += increment) {
      if (256 == b) b--;
      for (int g = 0; g <= 256; g += increment) {
        if (256 == g) g--;
        for (int r = 0; r <= 256; r += increment) {
          if (256 == r) r--;

          if (phase == 1) {
            docIds.add(docIds.size()+1);
            continue;
          }

          final float luminance = (b*0.0722f + g*0.7152f + r*0.2126f)/(255*3.0f); // https://en.wikipedia.org/wiki/Luma_(video)

          final SolrInputDocument doc = sdoc("id", Integer.toString(docIds.get(docIdsIdx++)),
            "channel_b_f", Float.toString(b/255f),
            "channel_g_f", Float.toString(g/255f),
            "channel_r_f", Float.toString(r/255f),
            "luminance_f", Float.toString(luminance));

          updateRequest.add(doc);
          ++numDocs;

        }
      }
    }
  }

  // make the update request
  updateRequest.commit(cluster.getSolrClient(), "mySolrCollection");
}
 
Example 15
Source File: ModelTupleStreamIntegrationTest.java    From deeplearning4j with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void setupCluster() throws Exception {

  final Path configsetPath = configset("mini-expressible");

  // create and serialize model
  {
    final Model model = buildModel();
    final File serializedModelFile = configsetPath
      .resolve(MY_SERIALIZED_MODEL_FILENAME)
      .toFile();
    ModelSerializer.writeModel(model, serializedModelFile.getPath(), false);
  }

  final String configName = "conf";
  final int numShards = 2;
  final int numReplicas = 2;
  final int maxShardsPerNode = 1;
  final int nodeCount = (numShards*numReplicas + (maxShardsPerNode-1))/maxShardsPerNode;

  // create and configure cluster
  configureCluster(nodeCount)
      .addConfig(configName, configsetPath)
      .configure();

  // create an empty collection
  CollectionAdminRequest.createCollection(MY_COLLECTION_NAME, configName, numShards, numReplicas)
      .setMaxShardsPerNode(maxShardsPerNode)
      .process(cluster.getSolrClient());

  // compose an update request
  final UpdateRequest updateRequest = new UpdateRequest();

  // add some documents
  updateRequest.add(
    sdoc("id", "green",
      "channel_b_f", "0",
      "channel_g_f", "255",
      "channel_r_f", "0"));
  updateRequest.add(
    sdoc("id", "black",
      "channel_b_f", "0",
      "channel_g_f", "0",
      "channel_r_f", "0"));
  updateRequest.add(
    sdoc("id", "yellow",
      "channel_b_f", "0",
      "channel_g_f", "255",
      "channel_r_f", "255"));

  // make the update request
  updateRequest.commit(cluster.getSolrClient(), MY_COLLECTION_NAME);
}