Java Code Examples for org.apache.solr.common.SolrDocument#put()

The following examples show how to use org.apache.solr.common.SolrDocument#put() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SmileWriterTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public static SolrDocument sampleDoc(Random r, int bufnum) {
  SolrDocument sdoc = new SolrDocument();
  sdoc.put("id", "my_id_" + bufnum);
  sdoc.put("author", str(r, 10 + r.nextInt(10)));
  sdoc.put("address", str(r, 20 + r.nextInt(20)));
  sdoc.put("license", str(r, 10));
  sdoc.put("title", str(r, 5 + r.nextInt(10)));
  sdoc.put("title_bin", str(r, 5 + r.nextInt(10)).getBytes(StandardCharsets.UTF_8));
  sdoc.put("modified_dt", r.nextInt(1000000));
  sdoc.put("creation_dt", r.nextInt(1000000));
  sdoc.put("birthdate_dt", r.nextInt(1000000));
  sdoc.put("clean", r.nextBoolean());
  sdoc.put("dirty", r.nextBoolean());
  sdoc.put("employed", r.nextBoolean());
  sdoc.put("priority", r.nextInt(100));
  sdoc.put("dependents", r.nextInt(6));
  sdoc.put("level", r.nextInt(101));
  sdoc.put("education_level", r.nextInt(10));
  // higher level of reuse for string values
  sdoc.put("state", "S"+r.nextInt(50));
  sdoc.put("country", "Country"+r.nextInt(20));
  sdoc.put("some_boolean", ""+r.nextBoolean());
  sdoc.put("another_boolean", ""+r.nextBoolean());
  return sdoc;
}
 
Example 2
Source File: TestBinaryResponseWriter.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testResolverSolrDocumentPartialFields() throws Exception {
  LocalSolrQueryRequest req = lrf.makeRequest("q", "*:*",
                                              "fl", "id,xxx,ddd_s"); 
  SolrDocument in = new SolrDocument();
  in.addField("id", 345);
  in.addField("aaa_s", "aaa");
  in.addField("bbb_s", "bbb");
  in.addField("ccc_s", "ccc");
  in.addField("ddd_s", "ddd");
  in.addField("eee_s", "eee");    

  Resolver r = new Resolver(req, new SolrReturnFields(req));
  Object o = r.resolve(in, new JavaBinCodec());

  assertNotNull("obj is null", o);
  assertTrue("obj is not doc", o instanceof SolrDocument);

  SolrDocument out = new SolrDocument();
  for (Map.Entry<String, Object> e : in) {
    if(r.isWritable(e.getKey())) out.put(e.getKey(),e.getValue());

  }
  assertTrue("id not found", out.getFieldNames().contains("id"));
  assertTrue("ddd_s not found", out.getFieldNames().contains("ddd_s"));
  assertEquals("Wrong number of fields found", 
               2, out.getFieldNames().size());
  req.close();

}
 
Example 3
Source File: AnalysisHandler.java    From chronix.server with Apache License 2.0 5 votes vote down vote up
private SolrDocument solrDocumentWithOutTimeSeriesFunctionResults(boolean dataShouldReturned, boolean dataAsJson, ChronixTimeSeries timeSeries) {
    SolrDocument doc = new SolrDocument();

    //add the join key
    doc.put(ChronixQueryParams.JOIN_KEY, timeSeries.getJoinKey());

    for (Map.Entry<String, Object> entry : (Set<Map.Entry<String, Object>>) timeSeries.getAttributes().entrySet()) {
        doc.addField(entry.getKey(), entry.getValue());
    }

    //add the metric field as it is not stored in the getAttributes
    doc.addField(Schema.NAME, timeSeries.getName());
    doc.addField(Schema.TYPE, timeSeries.getType());

    if (dataShouldReturned) {
        //ensure that the returned data is sorted
        timeSeries.sort();
        //data should returned serialized as json
        if (dataAsJson) {
            doc.setField(ChronixQueryParams.DATA_AS_JSON, timeSeries.dataAsJson());
        } else {
            doc.addField(Schema.DATA, timeSeries.dataAsBlob());
        }
    }

    //TODO: Fix this. It is expensive to calculate this based on the points.
    // How can we avoid this?
    // Optimization: Transformations should return the first an the last point
    // Aggregations / Analyses does not need to return this.
    doc.addField(Schema.START, timeSeries.getStart());
    doc.addField(Schema.END, timeSeries.getEnd());

    return doc;
}
 
Example 4
Source File: TestJavaBinCodec.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@SuppressWarnings({"unchecked"})
public static void doDecodePerf(String[] args) throws Exception {
  int arg=0;
  int nThreads = Integer.parseInt(args[arg++]);
  int nBuffers = Integer.parseInt(args[arg++]);
  final long iter = Long.parseLong(args[arg++]);
  int cacheSz = Integer.parseInt(args[arg++]);

  Random r = new Random(0);

  final byte[][] buffers = new byte[nBuffers][];

  for (int bufnum=0; bufnum<nBuffers; bufnum++) {
    SolrDocument sdoc = new SolrDocument();
    sdoc.put("id", "my_id_" + bufnum);
    sdoc.put("author", str(r, 10 + r.nextInt(10)));
    sdoc.put("address", str(r, 20 + r.nextInt(20)));
    sdoc.put("license", str(r, 10));
    sdoc.put("title", str(r, 5 + r.nextInt(10)));
    sdoc.put("modified_dt", r.nextInt(1000000));
    sdoc.put("creation_dt", r.nextInt(1000000));
    sdoc.put("birthdate_dt", r.nextInt(1000000));
    sdoc.put("clean", r.nextBoolean());
    sdoc.put("dirty", r.nextBoolean());
    sdoc.put("employed", r.nextBoolean());
    sdoc.put("priority", r.nextInt(100));
    sdoc.put("dependents", r.nextInt(6));
    sdoc.put("level", r.nextInt(101));
    sdoc.put("education_level", r.nextInt(10));
    // higher level of reuse for string values
    sdoc.put("state", "S"+r.nextInt(50));
    sdoc.put("country", "Country"+r.nextInt(20));
    sdoc.put("some_boolean", ""+r.nextBoolean());
    sdoc.put("another_boolean", ""+r.nextBoolean());

    buffers[bufnum] = getBytes(sdoc);
  }

  int ret = 0;
  final RTimer timer = new RTimer();
  @SuppressWarnings({"rawtypes"})
  ConcurrentLRUCache underlyingCache = cacheSz > 0 ? new ConcurrentLRUCache<>(cacheSz,cacheSz-cacheSz/10,cacheSz,cacheSz/10,false,true,null) : null;  // the cache in the first version of the patch was 10000,9000,10000,1000,false,true,null
  final JavaBinCodec.StringCache stringCache = underlyingCache==null ? null : new JavaBinCodec.StringCache(underlyingCache);
  if (nThreads <= 0) {
    ret += doDecode(buffers, iter, stringCache);
  } else {
    runInThreads(nThreads, () -> {
      try {
        doDecode(buffers, iter, stringCache);
      } catch (IOException e) {
        e.printStackTrace();
      }
    });
  }

  long n = iter * Math.max(1,nThreads);
  System.out.println("ret=" + ret + " THROUGHPUT=" + (n*1000 / timer.getTime()));
  if (underlyingCache != null) System.out.println("cache: hits=" + underlyingCache.getStats().getCumulativeHits() + " lookups=" + underlyingCache.getStats().getCumulativeLookups() + " size=" + underlyingCache.getStats().getCurrentSize());
}
 
Example 5
Source File: TestPutSolrRecord.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Test
public void testPutSolrOnTriggerIndex() throws IOException, InitializationException, SolrServerException {
    final SolrClient solrClient = createEmbeddedSolrClient(DEFAULT_SOLR_CORE);
    TestableProcessor proc = new TestableProcessor(solrClient);

    TestRunner runner= createDefaultTestRunner(proc);
    MockRecordParser recordParser = new MockRecordParser();
    runner.addControllerService("parser", recordParser);

    runner.enableControllerService(recordParser);
    runner.setProperty(PutSolrRecord.RECORD_READER, "parser");

    runner.setProperty(PutSolrRecord.UPDATE_PATH, "/update");

    recordParser.addSchemaField("id", RecordFieldType.INT);
    recordParser.addSchemaField("first", RecordFieldType.STRING);
    recordParser.addSchemaField("last", RecordFieldType.STRING);
    recordParser.addSchemaField("grade", RecordFieldType.INT);
    recordParser.addSchemaField("subject", RecordFieldType.STRING);
    recordParser.addSchemaField("test", RecordFieldType.STRING);
    recordParser.addSchemaField("marks", RecordFieldType.INT);

    SolrDocument solrDocument = new SolrDocument();
    solrDocument.put("id",1);
    solrDocument.put("first","Abhinav");
    solrDocument.put("last","R");
    solrDocument.put("grade",8);
    solrDocument.put("subject","Chemistry");
    solrDocument.put("test","term1");
    solrDocument.put("marks",98);

    recordParser.addRecord(1, "Abhinav","R",8,"Chemistry","term1", 98);

    try {
        runner.enqueue(new byte[0], new HashMap<String, String>() {{
            put("id", "1");
        }});
        runner.run(1, false);
        verifySolrDocuments(proc.getSolrClient(), Collections.singletonList(solrDocument));
        runner.assertTransferCount(PutSolrRecord.REL_FAILURE, 0);
        runner.assertTransferCount(PutSolrRecord.REL_CONNECTION_FAILURE, 0);
        runner.assertTransferCount(PutSolrRecord.REL_SUCCESS, 1);
    } finally {
        try {
            proc.getSolrClient().close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
 
Example 6
Source File: TestPutSolrRecord.java    From nifi with Apache License 2.0 4 votes vote down vote up
@Test
public void testPutSolrOnTriggerIndexForANestedRecord() throws IOException, InitializationException, SolrServerException {
    final SolrClient solrClient = createEmbeddedSolrClient(DEFAULT_SOLR_CORE);
    TestableProcessor proc = new TestableProcessor(solrClient);

    TestRunner runner= createDefaultTestRunner(proc);
    MockRecordParser recordParser = new MockRecordParser();
    runner.addControllerService("parser", recordParser);

    runner.enableControllerService(recordParser);
    runner.setProperty(PutSolrRecord.RECORD_READER, "parser");

    runner.setProperty(PutSolrRecord.UPDATE_PATH, "/update");

    recordParser.addSchemaField("id", RecordFieldType.INT);
    recordParser.addSchemaField("first", RecordFieldType.STRING);
    recordParser.addSchemaField("last", RecordFieldType.STRING);
    recordParser.addSchemaField("grade", RecordFieldType.INT);
    recordParser.addSchemaField("exam", RecordFieldType.RECORD);

    final List<RecordField> fields = new ArrayList<>();
    fields.add(new RecordField("subject", RecordFieldType.STRING.getDataType()));
    fields.add(new RecordField("test", RecordFieldType.STRING.getDataType()));
    fields.add(new RecordField("marks", RecordFieldType.INT.getDataType()));
    RecordSchema schema = new SimpleRecordSchema(fields);

    Map<String,Object> values = new HashMap<>();
    values.put("subject","Chemistry");
    values.put("test","term1");
    values.put("marks",98);
    final Record record = new MapRecord(schema,values);

    recordParser.addRecord(1, "Abhinav","R",8,record);


    SolrDocument solrDocument = new SolrDocument();
    solrDocument.put("id",1);
    solrDocument.put("first","Abhinav");
    solrDocument.put("last","R");
    solrDocument.put("grade",8);
    solrDocument.put("exam_subject","Chemistry");
    solrDocument.put("exam_test","term1");
    solrDocument.put("exam_marks",98);

    try {
        runner.enqueue(new byte[0], new HashMap<String, String>() {{
            put("id", "1");
        }});
        runner.run(1, false);
        runner.assertTransferCount(PutSolrRecord.REL_FAILURE, 0);
        runner.assertTransferCount(PutSolrRecord.REL_CONNECTION_FAILURE, 0);
        runner.assertTransferCount(PutSolrRecord.REL_SUCCESS, 1);
        verifySolrDocuments(proc.getSolrClient(), Collections.singletonList(solrDocument));
    } finally {
        try {
            proc.getSolrClient().close();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}