Java Code Examples for org.apache.lucene.document.Field.Store#NO

The following examples show how to use org.apache.lucene.document.Field.Store#NO . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SimpleDocumentWriter.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
private void addToDoc(IndexKey key, String... values){
  Preconditions.checkArgument(key.getValueType() == String.class);
  final boolean sorted = key.isSorted();
  if (sorted) {
    checkIfSorted(key, (Object[]) values);
  }

  checkIfMultiValueField(key, (Object[]) values);

  final String indexFieldName = key.getIndexFieldName();
  final Store stored = key.isStored() ? Store.YES : Store.NO;
  for (final String value : values) {
    if (value == null) {
      continue;
    }
    final String truncatedValue = StringUtils.abbreviate(value, MAX_STRING_LENGTH);
    doc.add(new StringField(indexFieldName, truncatedValue, stored));
  }

  if (sorted && values.length == 1 && values[0] != null) {
    Preconditions.checkArgument(key.getSortedValueType() == SearchFieldSorting.FieldType.STRING);
    doc.add(new SortedDocValuesField(indexFieldName, new BytesRef(values[0])));
  }
}
 
Example 2
Source File: SimpleDocumentWriter.java    From dremio-oss with Apache License 2.0 6 votes vote down vote up
private void addToDoc(IndexKey key, byte[]... values){
  Preconditions.checkArgument(key.getValueType() == String.class);
  final boolean sorted = key.isSorted();
  if (sorted) {
    checkIfSorted(key, (Object[]) values);
  }

  checkIfMultiValueField(key, (Object[]) values);

  final String indexFieldName = key.getIndexFieldName();
  final Store stored = key.isStored() ? Store.YES : Store.NO;
  for (final byte[] value : values) {
    if (value == null) {
      continue;
    }
    final BytesRef truncatedValue = new BytesRef(value,0, Math.min(value.length, MAX_STRING_LENGTH));
    doc.add(new StringField(indexFieldName, truncatedValue, stored));
  }

  if (sorted && values.length == 1 && values[0] != null) {
    Preconditions.checkArgument(key.getSortedValueType() == SearchFieldSorting.FieldType.STRING);
    doc.add(new SortedDocValuesField(indexFieldName, new BytesRef(values[0])));
  }
}
 
Example 3
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testClearFilter() throws IOException {
  Directory dir = newDirectory();
  final RandomIndexWriter w = new RandomIndexWriter(random(), dir);

  Document doc = new Document();
  StringField f = new StringField("color", "", Store.NO);
  doc.add(f);
  final int numDocs = atLeast(10);
  for (int i = 0; i < numDocs; ++i) {
    f.setStringValue(random().nextBoolean() ? "red" : "blue");
    w.addDocument(doc);
  }
  final DirectoryReader reader = w.getReader();
  final IndexSearcher searcher = newSearcher(reader);

  final Query query1 = new TermQuery(new Term("color", "blue"));
  // different instance yet equal
  final Query query2 = new TermQuery(new Term("color", "blue"));

  final LRUQueryCache queryCache = new LRUQueryCache(Integer.MAX_VALUE, Long.MAX_VALUE, context -> true, 1);
  searcher.setQueryCache(queryCache);
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);

  searcher.search(new BoostQuery(new ConstantScoreQuery(query1), random().nextFloat()), 1);
  assertEquals(1, queryCache.cachedQueries().size());

  queryCache.clearQuery(query2);

  assertTrue(queryCache.cachedQueries().isEmpty());
  queryCache.assertConsistent();

  reader.close();
  w.close();
  dir.close();
}
 
Example 4
Source File: TokenMapperMurmur.java    From stratio-cassandra with Apache License 2.0 5 votes vote down vote up
/** {@inheritDoc} */
@Override
public void addFields(Document document, DecoratedKey partitionKey) {
    Long value = (Long) partitionKey.getToken().getTokenValue();
    Field tokenField = new LongField(FIELD_NAME, value, Store.NO);
    document.add(tokenField);
}
 
Example 5
Source File: ReferenceCountingReadOnlyIndexReaderFactory.java    From alfresco-repository with GNU Lesser General Public License v3.0 4 votes vote down vote up
public Field get(int n, FieldSelector fieldSelector) throws IOException
{
    return new Field(fieldName, getStringValue(n, fieldName), Store.NO, Index.UN_TOKENIZED);
}
 
Example 6
Source File: BaseStoredFieldsFormatTestCase.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Nightly
public void testBigDocuments() throws IOException {
  assumeWorkingMMapOnWindows();
  
  // "big" as "much bigger than the chunk size"
  // for this test we force a FS dir
  // we can't just use newFSDirectory, because this test doesn't really index anything.
  // so if we get NRTCachingDir+SimpleText, we make massive stored fields and OOM (LUCENE-4484)
  Directory dir = new MockDirectoryWrapper(random(), new MMapDirectory(createTempDir("testBigDocuments")));
  IndexWriterConfig iwConf = newIndexWriterConfig(new MockAnalyzer(random()));
  iwConf.setMaxBufferedDocs(RandomNumbers.randomIntBetween(random(), 2, 30));
  RandomIndexWriter iw = new RandomIndexWriter(random(), dir, iwConf);

  if (dir instanceof MockDirectoryWrapper) {
    ((MockDirectoryWrapper) dir).setThrottling(Throttling.NEVER);
  }

  final Document emptyDoc = new Document(); // emptyDoc
  final Document bigDoc1 = new Document(); // lot of small fields
  final Document bigDoc2 = new Document(); // 1 very big field

  final Field idField = new StringField("id", "", Store.NO);
  emptyDoc.add(idField);
  bigDoc1.add(idField);
  bigDoc2.add(idField);

  final FieldType onlyStored = new FieldType(StringField.TYPE_STORED);
  onlyStored.setIndexOptions(IndexOptions.NONE);

  final Field smallField = new Field("fld", randomByteArray(random().nextInt(10), 256), onlyStored);
  final int numFields = RandomNumbers.randomIntBetween(random(), 500000, 1000000);
  for (int i = 0; i < numFields; ++i) {
    bigDoc1.add(smallField);
  }

  final Field bigField = new Field("fld", randomByteArray(RandomNumbers.randomIntBetween(random(), 1000000, 5000000), 2), onlyStored);
  bigDoc2.add(bigField);

  final int numDocs = atLeast(5);
  final Document[] docs = new Document[numDocs];
  for (int i = 0; i < numDocs; ++i) {
    docs[i] = RandomPicks.randomFrom(random(), Arrays.asList(emptyDoc, bigDoc1, bigDoc2));
  }
  for (int i = 0; i < numDocs; ++i) {
    idField.setStringValue("" + i);
    iw.addDocument(docs[i]);
    if (random().nextInt(numDocs) == 0) {
      iw.commit();
    }
  }
  iw.commit();
  iw.forceMerge(1); // look at what happens when big docs are merged
  final DirectoryReader rd = maybeWrapWithMergingReader(DirectoryReader.open(dir));
  final IndexSearcher searcher = new IndexSearcher(rd);
  for (int i = 0; i < numDocs; ++i) {
    final Query query = new TermQuery(new Term("id", "" + i));
    final TopDocs topDocs = searcher.search(query, 1);
    assertEquals("" + i, 1, topDocs.totalHits.value);
    final Document doc = rd.document(topDocs.scoreDocs[0].doc);
    assertNotNull(doc);
    final IndexableField[] fieldValues = doc.getFields("fld");
    assertEquals(docs[i].getFields("fld").length, fieldValues.length);
    if (fieldValues.length > 0) {
      assertEquals(docs[i].getFields("fld")[0].binaryValue(), fieldValues[0].binaryValue());
    }
  }
  rd.close();
  iw.close();
  dir.close();
}
 
Example 7
Source File: TestBooleanRewrites.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testRandom() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  TextField f = new TextField("body", "a b c", Store.NO);
  doc.add(f);
  w.addDocument(doc);
  f.setStringValue("");
  w.addDocument(doc);
  f.setStringValue("a b");
  w.addDocument(doc);
  f.setStringValue("b c");
  w.addDocument(doc);
  f.setStringValue("a");
  w.addDocument(doc);
  f.setStringValue("c");
  w.addDocument(doc);
  final int numRandomDocs = atLeast(3);
  for (int i = 0; i < numRandomDocs; ++i) {
    final int numTerms = random().nextInt(20);
    StringBuilder text = new StringBuilder();
    for (int j = 0; j < numTerms; ++j) {
      text.append((char) ('a' + random().nextInt(4))).append(' ');
    }
    f.setStringValue(text.toString());
    w.addDocument(doc);
  }
  final IndexReader reader = w.getReader();
  w.close();
  final IndexSearcher searcher1 = newSearcher(reader);
  final IndexSearcher searcher2 = new IndexSearcher(reader) {
    @Override
    public Query rewrite(Query original) throws IOException {
      // no-op: disable rewriting
      return original;
    }
  };
  searcher2.setSimilarity(searcher1.getSimilarity());

  final int iters = atLeast(1000);
  for (int i = 0; i < iters; ++i) {
    Query query = randomQuery();
    final TopDocs td1 = searcher1.search(query, 100);
    final TopDocs td2 = searcher2.search(query, 100);
    assertEquals(td1, td2);
  }

  searcher1.getIndexReader().close();
  dir.close();
}
 
Example 8
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testLRUEviction() throws Exception {
  Directory dir = newDirectory();
  final RandomIndexWriter w = new RandomIndexWriter(random(), dir);

  Document doc = new Document();
  StringField f = new StringField("color", "blue", Store.NO);
  doc.add(f);
  w.addDocument(doc);
  f.setStringValue("red");
  w.addDocument(doc);
  f.setStringValue("green");
  w.addDocument(doc);
  final DirectoryReader reader = w.getReader();
  final IndexSearcher searcher = newSearcher(reader);
  final LRUQueryCache queryCache = new LRUQueryCache(2, 100000, context -> true, Float.POSITIVE_INFINITY);

  final Query blue = new TermQuery(new Term("color", "blue"));
  final Query red = new TermQuery(new Term("color", "red"));
  final Query green = new TermQuery(new Term("color", "green"));

  assertEquals(Collections.emptyList(), queryCache.cachedQueries());

  searcher.setQueryCache(queryCache);
  // the filter is not cached on any segment: no changes
  searcher.setQueryCachingPolicy(NEVER_CACHE);
  searcher.search(new ConstantScoreQuery(green), 1);
  assertEquals(Collections.emptyList(), queryCache.cachedQueries());

  searcher.setQueryCachingPolicy(ALWAYS_CACHE);
  searcher.search(new ConstantScoreQuery(red), 1);

  if (!(queryCache.cachedQueries().equals(Collections.emptyList()))) {
    assertEquals(Arrays.asList(red), queryCache.cachedQueries());
  } else {
    // Let the cache load be completed
    Thread.sleep(200);
    assertEquals(Arrays.asList(red), queryCache.cachedQueries());
  }

  searcher.search(new ConstantScoreQuery(green), 1);

  if (!(queryCache.cachedQueries().equals(Arrays.asList(red)))) {
    assertEquals(Arrays.asList(red, green), queryCache.cachedQueries());
  } else {
    // Let the cache load be completed
    Thread.sleep(200);
    assertEquals(Arrays.asList(red, green), queryCache.cachedQueries());
  }

  searcher.search(new ConstantScoreQuery(red), 1);
  assertEquals(Arrays.asList(green, red), queryCache.cachedQueries());

  searcher.search(new ConstantScoreQuery(blue), 1);

  if (!(queryCache.cachedQueries().equals(Arrays.asList(green, red)))) {
    assertEquals(Arrays.asList(red, blue), queryCache.cachedQueries());
  } else {
    // Let the cache load be completed
    Thread.sleep(200);
    assertEquals(Arrays.asList(red, blue), queryCache.cachedQueries());
  }

  searcher.search(new ConstantScoreQuery(blue), 1);
  assertEquals(Arrays.asList(red, blue), queryCache.cachedQueries());

  searcher.search(new ConstantScoreQuery(green), 1);

  if (!(queryCache.cachedQueries().equals(Arrays.asList(red, blue)))) {
    assertEquals(Arrays.asList(blue, green), queryCache.cachedQueries());
  } else {
    // Let the cache load be completed
    Thread.sleep(200);
    assertEquals(Arrays.asList(blue, green), queryCache.cachedQueries());
  }

  searcher.setQueryCachingPolicy(NEVER_CACHE);
  searcher.search(new ConstantScoreQuery(red), 1);
  assertEquals(Arrays.asList(blue, green), queryCache.cachedQueries());

  reader.close();
  w.close();
  dir.close();
}
 
Example 9
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testStats() throws IOException {
  final LRUQueryCache queryCache = new LRUQueryCache(1, 10000000, context -> true, 1);

  Directory dir = newDirectory();
  final RandomIndexWriter w = new RandomIndexWriter(random(), dir);

  final List<String> colors = Arrays.asList("blue", "red", "green", "yellow");

  Document doc = new Document();
  StringField f = new StringField("color", "", Store.NO);
  doc.add(f);
  for (int i = 0; i < 10; ++i) {
    f.setStringValue(RandomPicks.randomFrom(random(), colors));
    w.addDocument(doc);
    if (random().nextBoolean()) {
      w.getReader().close();
    }
  }

  final DirectoryReader reader = w.getReader();
  final int segmentCount = reader.leaves().size();
  final IndexSearcher searcher = new IndexSearcher(reader);
  final Query query = new TermQuery(new Term("color", "red"));
  final Query query2 = new TermQuery(new Term("color", "blue"));

  searcher.setQueryCache(queryCache);
  // first pass, lookups without caching that all miss
  searcher.setQueryCachingPolicy(NEVER_CACHE);
  for (int i = 0; i < 10; ++i) {
    searcher.search(new ConstantScoreQuery(query), 1);
  }
  assertEquals(10 * segmentCount, queryCache.getTotalCount());
  assertEquals(0, queryCache.getHitCount());
  assertEquals(10 * segmentCount, queryCache.getMissCount());
  assertEquals(0, queryCache.getCacheCount());
  assertEquals(0, queryCache.getEvictionCount());
  assertEquals(0, queryCache.getCacheSize());

  // second pass, lookups + caching, only the first one is a miss
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);
  for (int i = 0; i < 10; ++i) {
    searcher.search(new ConstantScoreQuery(query), 1);
  }
  assertEquals(20 * segmentCount, queryCache.getTotalCount());
  assertEquals(9 * segmentCount, queryCache.getHitCount());
  assertEquals(11 * segmentCount, queryCache.getMissCount());
  assertEquals(1 * segmentCount, queryCache.getCacheCount());
  assertEquals(0, queryCache.getEvictionCount());
  assertEquals(1 * segmentCount, queryCache.getCacheSize());

  // third pass lookups without caching, we only have hits
  searcher.setQueryCachingPolicy(NEVER_CACHE);
  for (int i = 0; i < 10; ++i) {
    searcher.search(new ConstantScoreQuery(query), 1);
  }
  assertEquals(30 * segmentCount, queryCache.getTotalCount());
  assertEquals(19 * segmentCount, queryCache.getHitCount());
  assertEquals(11 * segmentCount, queryCache.getMissCount());
  assertEquals(1 * segmentCount, queryCache.getCacheCount());
  assertEquals(0, queryCache.getEvictionCount());
  assertEquals(1 * segmentCount, queryCache.getCacheSize());

  // fourth pass with a different filter which will trigger evictions since the size is 1
  searcher.setQueryCachingPolicy(ALWAYS_CACHE);
  for (int i = 0; i < 10; ++i) {
    searcher.search(new ConstantScoreQuery(query2), 1);
  }
  assertEquals(40 * segmentCount, queryCache.getTotalCount());
  assertEquals(28 * segmentCount, queryCache.getHitCount());
  assertEquals(12 * segmentCount, queryCache.getMissCount());
  assertEquals(2 * segmentCount, queryCache.getCacheCount());
  assertEquals(1 * segmentCount, queryCache.getEvictionCount());
  assertEquals(1 * segmentCount, queryCache.getCacheSize());

  // now close, causing evictions due to the closing of segment cores
  reader.close();
  w.close();
  assertEquals(40 * segmentCount, queryCache.getTotalCount());
  assertEquals(28 * segmentCount, queryCache.getHitCount());
  assertEquals(12 * segmentCount, queryCache.getMissCount());
  assertEquals(2 * segmentCount, queryCache.getCacheCount());
  assertEquals(2 * segmentCount, queryCache.getEvictionCount());
  assertEquals(0, queryCache.getCacheSize());

  dir.close();
}
 
Example 10
Source File: TestLRUQueryCache.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testRandom() throws IOException {
  Directory dir = newDirectory();
  final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  TextField f = new TextField("foo", "foo", Store.NO);
  doc.add(f);
  w.addDocument(doc);
  IndexReader reader = w.getReader();

  final int maxSize;
  final long maxRamBytesUsed;
  final int iters;

  if (TEST_NIGHTLY) {
    maxSize = TestUtil.nextInt(random(), 1, 10000);
    maxRamBytesUsed = TestUtil.nextLong(random(), 1, 5000000);
    iters = atLeast(20000);
  } else {
    maxSize = TestUtil.nextInt(random(), 1, 1000);
    maxRamBytesUsed = TestUtil.nextLong(random(), 1, 500000);
    iters = atLeast(2000);
  }

  final LRUQueryCache queryCache = new LRUQueryCache(maxSize, maxRamBytesUsed, context -> random().nextBoolean(), Float.POSITIVE_INFINITY);
  IndexSearcher uncachedSearcher = null;
  IndexSearcher cachedSearcher = null;

  for (int i = 0; i < iters; ++i) {
    if (i == 0 || random().nextInt(100) == 1) {
      reader.close();
      f.setStringValue(RandomPicks.randomFrom(random(), Arrays.asList("foo", "bar", "bar baz")));
      w.addDocument(doc);
      if (random().nextBoolean()) {
        w.deleteDocuments(buildRandomQuery(0));
      }
      reader = w.getReader();
      uncachedSearcher = newSearcher(reader);
      uncachedSearcher.setQueryCache(null);
      cachedSearcher = newSearcher(reader);
      cachedSearcher.setQueryCache(queryCache);
      cachedSearcher.setQueryCachingPolicy(ALWAYS_CACHE);
    }
    final Query q = buildRandomQuery(0);
    assertEquals(uncachedSearcher.count(q), cachedSearcher.count(q));
    if (rarely()) {
      queryCache.assertConsistent();
    }
  }
  queryCache.assertConsistent();
  w.close();
  reader.close();
  dir.close();
  queryCache.assertConsistent();
}
 
Example 11
Source File: TestTopFieldCollector.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testComputeScoresOnlyOnce() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  StringField text = new StringField("text", "foo", Store.NO);
  doc.add(text);
  NumericDocValuesField relevance = new NumericDocValuesField("relevance", 1);
  doc.add(relevance);
  w.addDocument(doc);
  text.setStringValue("bar");
  w.addDocument(doc);
  text.setStringValue("baz");
  w.addDocument(doc);
  IndexReader reader = w.getReader();
  Query foo = new TermQuery(new Term("text", "foo"));
  Query bar = new TermQuery(new Term("text", "bar"));
  foo = new BoostQuery(foo, 2);
  Query baz = new TermQuery(new Term("text", "baz"));
  baz = new BoostQuery(baz, 3);
  Query query = new BooleanQuery.Builder()
      .add(foo, Occur.SHOULD)
      .add(bar, Occur.SHOULD)
      .add(baz, Occur.SHOULD)
      .build();
  final IndexSearcher searcher = new IndexSearcher(reader);
  for (Sort sort : new Sort[] {new Sort(FIELD_SCORE), new Sort(new SortField("f", SortField.Type.SCORE))}) {
    final TopFieldCollector topCollector = TopFieldCollector.create(sort, TestUtil.nextInt(random(), 1, 2), Integer.MAX_VALUE);
    final Collector assertingCollector = new Collector() {
      @Override
      public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException {
        final LeafCollector in = topCollector.getLeafCollector(context);
        return new FilterLeafCollector(in) {
          @Override
          public void setScorer(final Scorable scorer) throws IOException {
            Scorable s = new FilterScorable(scorer) {

              int lastComputedDoc = -1;

              @Override
              public float score() throws IOException {
                if (lastComputedDoc == docID()) {
                  throw new AssertionError("Score computed twice on " + docID());
                }
                lastComputedDoc = docID();
                return scorer.score();
              }

            };
            super.setScorer(s);
          }
        };
      }
      @Override
      public ScoreMode scoreMode() {
        return topCollector.scoreMode();
      }
    };
    searcher.search(query, assertingCollector);
  }
  reader.close();
  w.close();
  dir.close();
}
 
Example 12
Source File: TestFeatureField.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testDemo() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, newIndexWriterConfig()
      .setMergePolicy(newLogMergePolicy(random().nextBoolean())));
  Document doc = new Document();
  FeatureField pagerank = new FeatureField("features", "pagerank", 1);
  doc.add(pagerank);
  TextField body = new TextField("body", "", Store.NO);
  doc.add(body);

  pagerank.setFeatureValue(10);
  body.setStringValue("Apache Lucene");
  writer.addDocument(doc);

  pagerank.setFeatureValue(1000);
  body.setStringValue("Apache Web HTTP server");
  writer.addDocument(doc);

  pagerank.setFeatureValue(1);
  body.setStringValue("Lucene is a search engine");
  writer.addDocument(doc);

  pagerank.setFeatureValue(42);
  body.setStringValue("Lucene in the sky with diamonds");
  writer.addDocument(doc);

  DirectoryReader reader = writer.getReader();
  writer.close();

  // NOTE: If you need to make changes below, then you likely also need to
  // update javadocs of FeatureField.

  IndexSearcher searcher = new IndexSearcher(reader);
  searcher.setSimilarity(new BM25Similarity());
  Query query = new BooleanQuery.Builder()
      .add(new TermQuery(new Term("body", "apache")), Occur.SHOULD)
      .add(new TermQuery(new Term("body", "lucene")), Occur.SHOULD)
      .build();
  Query boost = FeatureField.newSaturationQuery("features", "pagerank");
  Query boostedQuery = new BooleanQuery.Builder()
      .add(query, Occur.MUST)
      .add(boost, Occur.SHOULD)
      .build();
  TopDocs topDocs = searcher.search(boostedQuery, 10);
  assertEquals(4, topDocs.scoreDocs.length);
  assertEquals(1, topDocs.scoreDocs[0].doc);
  assertEquals(0, topDocs.scoreDocs[1].doc);
  assertEquals(3, topDocs.scoreDocs[2].doc);
  assertEquals(2, topDocs.scoreDocs[3].doc);

  reader.close();
  dir.close();
}
 
Example 13
Source File: BaseFieldManagerTest.java    From incubator-retired-blur with Apache License 2.0 4 votes vote down vote up
private Field newFieldsNoStore(String name, String value) {
  return new StringField(name, value, Store.NO);
}
 
Example 14
Source File: FullKeyMapper.java    From stratio-cassandra with Apache License 2.0 2 votes vote down vote up
/**
 * Adds to the specified Lucene {@link Document} the full row key formed by the specified partition key and the
 * clustering key.
 *
 * @param document      A Lucene {@link Document}.
 * @param partitionKey  A partition key.
 * @param clusteringKey A clustering key.
 */
public void addFields(Document document, DecoratedKey partitionKey, CellName clusteringKey) {
    ByteBuffer fullKey = byteBuffer(partitionKey, clusteringKey);
    Field field = new StringField(FIELD_NAME, ByteBufferUtils.toString(fullKey), Store.NO);
    document.add(field);
}