Java Code Examples for org.apache.lucene.document.Field#setFloatValue()

The following examples show how to use org.apache.lucene.document.Field#setFloatValue() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestLegacyField.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testLegacyFloatField() throws Exception {
  Field fields[] = new Field[] {
      new LegacyFloatField("foo", 5f, Field.Store.NO),
      new LegacyFloatField("foo", 5f, Field.Store.YES)
  };

  for (Field field : fields) {
    trySetByteValue(field);
    trySetBytesValue(field);
    trySetBytesRefValue(field);
    trySetDoubleValue(field);
    trySetIntValue(field);
    field.setFloatValue(6f); // ok
    trySetLongValue(field);
    trySetReaderValue(field);
    trySetShortValue(field);
    trySetStringValue(field);
    trySetTokenStreamValue(field);
    
    assertEquals(6f, field.numericValue().floatValue(), 0.0f);
  }
}
 
Example 2
Source File: TestDiversifiedTopDocsCollector.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
public void setUp() throws Exception {
  super.setUp();

  // populate an index with documents - artist, song and weeksAtNumberOne
  dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  Document doc = new Document();

  Field yearField = newTextField("year", "", Field.Store.NO);
  SortedDocValuesField artistField = new SortedDocValuesField("artist",
      new BytesRef(""));
  Field weeksAtNumberOneField = new FloatDocValuesField("weeksAtNumberOne",
      0.0F);
  Field weeksStoredField = new StoredField("weeks", 0.0F);
  Field idField = newStringField("id", "", Field.Store.YES);
  Field songField = newTextField("song", "", Field.Store.NO);
  Field storedArtistField = newTextField("artistName", "", Field.Store.NO);

  doc.add(idField);
  doc.add(weeksAtNumberOneField);
  doc.add(storedArtistField);
  doc.add(songField);
  doc.add(weeksStoredField);
  doc.add(yearField);
  doc.add(artistField);

  parsedRecords.clear();
  for (int i = 0; i < hitsOfThe60s.length; i++) {
    String cols[] = hitsOfThe60s[i].split("\t");
    Record record = new Record(String.valueOf(i), cols[0], cols[1], cols[2],
        Float.parseFloat(cols[3]));
    parsedRecords.put(record.id, record);
    idField.setStringValue(record.id);
    yearField.setStringValue(record.year);
    storedArtistField.setStringValue(record.artist);
    artistField.setBytesValue(new BytesRef(record.artist));
    songField.setStringValue(record.song);
    weeksStoredField.setFloatValue(record.weeks);
    weeksAtNumberOneField.setFloatValue(record.weeks);
    writer.addDocument(doc);
    if (i % 10 == 0) {
      // Causes the creation of multiple segments for our test
      writer.commit();
    }
  }
  reader = writer.getReader();
  writer.close();
  searcher = newSearcher(reader);
  artistDocValues = MultiDocValues.getSortedValues(reader, "artist");
}
 
Example 3
Source File: BaseTestRangeFilter.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private static IndexReader build(Random random, TestIndex index) throws IOException {
  /* build an index */
  
  Document doc = new Document();
  Field idField = newStringField(random, "id", "", Field.Store.YES);
  Field idDVField = new SortedDocValuesField("id", new BytesRef());
  Field intIdField = new IntPoint("id_int", 0);
  Field intDVField = new NumericDocValuesField("id_int", 0);
  Field floatIdField = new FloatPoint("id_float", 0);
  Field floatDVField = new NumericDocValuesField("id_float", 0);
  Field longIdField = new LongPoint("id_long", 0);
  Field longDVField = new NumericDocValuesField("id_long", 0);
  Field doubleIdField = new DoublePoint("id_double", 0);
  Field doubleDVField = new NumericDocValuesField("id_double", 0);
  Field randField = newStringField(random, "rand", "", Field.Store.YES);
  Field randDVField = new SortedDocValuesField("rand", new BytesRef());
  Field bodyField = newStringField(random, "body", "", Field.Store.NO);
  Field bodyDVField = new SortedDocValuesField("body", new BytesRef());
  doc.add(idField);
  doc.add(idDVField);
  doc.add(intIdField);
  doc.add(intDVField);
  doc.add(floatIdField);
  doc.add(floatDVField);
  doc.add(longIdField);
  doc.add(longDVField);
  doc.add(doubleIdField);
  doc.add(doubleDVField);
  doc.add(randField);
  doc.add(randDVField);
  doc.add(bodyField);
  doc.add(bodyDVField);

  RandomIndexWriter writer = new RandomIndexWriter(random, index.index, 
                                                   newIndexWriterConfig(random, new MockAnalyzer(random))
                                                   .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(TestUtil.nextInt(random, 50, 1000)).setMergePolicy(newLogMergePolicy()));
  TestUtil.reduceOpenFiles(writer.w);

  while(true) {

    int minCount = 0;
    int maxCount = 0;

    for (int d = minId; d <= maxId; d++) {
      idField.setStringValue(pad(d));
      idDVField.setBytesValue(new BytesRef(pad(d)));
      intIdField.setIntValue(d);
      intDVField.setLongValue(d);
      floatIdField.setFloatValue(d);
      floatDVField.setLongValue(Float.floatToRawIntBits(d));
      longIdField.setLongValue(d);
      longDVField.setLongValue(d);
      doubleIdField.setDoubleValue(d);
      doubleDVField.setLongValue(Double.doubleToRawLongBits(d));
      int r = index.allowNegativeRandomInts ? random.nextInt() : random
        .nextInt(Integer.MAX_VALUE);
      if (index.maxR < r) {
        index.maxR = r;
        maxCount = 1;
      } else if (index.maxR == r) {
        maxCount++;
      }

      if (r < index.minR) {
        index.minR = r;
        minCount = 1;
      } else if (r == index.minR) {
        minCount++;
      }
      randField.setStringValue(pad(r));
      randDVField.setBytesValue(new BytesRef(pad(r)));
      bodyField.setStringValue("body");
      bodyDVField.setBytesValue(new BytesRef("body"));
      writer.addDocument(doc);
    }

    if (minCount == 1 && maxCount == 1) {
      // our subclasses rely on only 1 doc having the min or
      // max, so, we loop until we satisfy that.  it should be
      // exceedingly rare (Yonik calculates 1 in ~429,000)
      // times) that this loop requires more than one try:
      IndexReader ir = writer.getReader();
      writer.close();
      return ir;
    }

    // try again
    writer.deleteAll();
  }
}