Java Code Examples for org.apache.lucene.document.StoredField#TYPE

The following examples show how to use org.apache.lucene.document.StoredField#TYPE . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestTermVectorsReader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testIllegalVectorsWithoutIndexed() throws Exception {
  Directory dir = newDirectory();
  MockAnalyzer a = new MockAnalyzer(random());
  a.setEnableChecks(false);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, a);
  FieldType ft = new FieldType(StoredField.TYPE);
  ft.setStoreTermVectors(true);
  Document doc = new Document();
  doc.add(new Field("field", "value", ft));
  
  IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
    w.addDocument(doc);
  });
  assertEquals("cannot store term vectors for a field that is not indexed (field=\"field\")", expected.getMessage());
  
  w.close();
  dir.close();
}
 
Example 2
Source File: TestTermVectorsReader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testIllegalVectorPositionsWithoutIndexed() throws Exception {
  Directory dir = newDirectory();
  MockAnalyzer a = new MockAnalyzer(random());
  a.setEnableChecks(false);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, a);
  FieldType ft = new FieldType(StoredField.TYPE);
  ft.setStoreTermVectorPositions(true);
  Document doc = new Document();
  doc.add(new Field("field", "value", ft));
  
  IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
    w.addDocument(doc);
  });
  assertEquals("cannot store term vector positions for a field that is not indexed (field=\"field\")", expected.getMessage());
  
  w.close();
  dir.close();
}
 
Example 3
Source File: TestTermVectorsReader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testIllegalVectorOffsetsWithoutIndexed() throws Exception {
  Directory dir = newDirectory();
  MockAnalyzer a = new MockAnalyzer(random());
  a.setEnableChecks(false);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, a);
  FieldType ft = new FieldType(StoredField.TYPE);
  ft.setStoreTermVectorOffsets(true);
  Document doc = new Document();
  doc.add(new Field("field", "value", ft));
  
  IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
    w.addDocument(doc);
  });
  assertEquals("cannot store term vector offsets for a field that is not indexed (field=\"field\")", expected.getMessage());
  
  w.close();
  dir.close();
}
 
Example 4
Source File: TestTermVectorsReader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testIllegalVectorPayloadsWithoutIndexed() throws Exception {
  Directory dir = newDirectory();
  MockAnalyzer a = new MockAnalyzer(random());
  a.setEnableChecks(false);
  RandomIndexWriter w = new RandomIndexWriter(random(), dir, a);
  FieldType ft = new FieldType(StoredField.TYPE);
  ft.setStoreTermVectorPayloads(true);
  Document doc = new Document();
  doc.add(new Field("field", "value", ft));
  
  IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
    w.addDocument(doc);
  });
  assertEquals("cannot store term vector payloads for a field that is not indexed (field=\"field\")", expected.getMessage());

  w.close();
  dir.close();
}
 
Example 5
Source File: TestIndexWriterExceptions.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** test a null string value doesn't abort the entire segment */
public void testNullStoredFieldReuse() throws Exception {
  Directory dir = newDirectory();
  Analyzer analyzer = new MockAnalyzer(random());
  IndexWriter iw = new IndexWriter(dir, new IndexWriterConfig(analyzer));
  // add good document
  Document doc = new Document();
  Field theField = new StoredField("foo", "hello", StoredField.TYPE);
  doc.add(theField);
  iw.addDocument(doc);
  expectThrows(IllegalArgumentException.class, () -> {
    // set to null value
    theField.setStringValue(null);
    iw.addDocument(doc);
  });

  assertNull(iw.getTragicException());
  iw.close();
  // make sure we see our good doc
  DirectoryReader r = DirectoryReader.open(dir);
  assertEquals(1, r.numDocs());
  r.close();
  dir.close();
}
 
Example 6
Source File: TestTermVectorsWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testNoAbortOnBadTVSettings() throws Exception {
  Directory dir = newDirectory();
  // Don't use RandomIndexWriter because we want to be sure both docs go to 1 seg:
  IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter iw = new IndexWriter(dir, iwc);

  Document doc = new Document();
  iw.addDocument(doc);
  FieldType ft = new FieldType(StoredField.TYPE);
  ft.setStoreTermVectors(true);
  ft.freeze();
  doc.add(new Field("field", "value", ft));

  expectThrows(IllegalArgumentException.class, () -> {
    iw.addDocument(doc);
  });

  IndexReader r = DirectoryReader.open(iw);

  // Make sure the exc didn't lose our first document:
  assertEquals(1, r.numDocs());
  iw.close();
  r.close();
  dir.close();
}
 
Example 7
Source File: TestIndexableField.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public IndexableFieldType fieldType() {
  FieldType ft = new FieldType(StoredField.TYPE);
  ft.setStoreTermVectors(true);
  ft.freeze();
  return ft;
}
 
Example 8
Source File: StoredFieldsWriter.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
public IndexableFieldType fieldType() {
  return StoredField.TYPE;
}
 
Example 9
Source File: SortingStoredFieldsConsumer.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Override
public IndexableFieldType fieldType() {
  return StoredField.TYPE;
}
 
Example 10
Source File: TestIndexWriter.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testIndexStoreCombos() throws Exception {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));
  byte[] b = new byte[50];
  for(int i=0;i<50;i++)
    b[i] = (byte) (i+77);

  Document doc = new Document();

  FieldType customType = new FieldType(StoredField.TYPE);
  customType.setTokenized(true);

  Field f = new Field("binary", b, 10, 17, customType);
  // TODO: this is evil, changing the type after creating the field:
  customType.setIndexOptions(IndexOptions.DOCS);
  final MockTokenizer doc1field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
  doc1field1.setReader(new StringReader("doc1field1"));
  f.setTokenStream(doc1field1);

  FieldType customType2 = new FieldType(TextField.TYPE_STORED);

  Field f2 = newField("string", "value", customType2);
  final MockTokenizer doc1field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
  doc1field2.setReader(new StringReader("doc1field2"));
  f2.setTokenStream(doc1field2);
  doc.add(f);
  doc.add(f2);
  w.addDocument(doc);

  // add 2 docs to test in-memory merging
  final MockTokenizer doc2field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
  doc2field1.setReader(new StringReader("doc2field1"));
  f.setTokenStream(doc2field1);
  final MockTokenizer doc2field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
  doc2field2.setReader(new StringReader("doc2field2"));
  f2.setTokenStream(doc2field2);
  w.addDocument(doc);

  // force segment flush so we can force a segment merge with doc3 later.
  w.commit();

  final MockTokenizer doc3field1 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
  doc3field1.setReader(new StringReader("doc3field1"));
  f.setTokenStream(doc3field1);
  final MockTokenizer doc3field2 = new MockTokenizer(MockTokenizer.WHITESPACE, false);
  doc3field2.setReader(new StringReader("doc3field2"));
  f2.setTokenStream(doc3field2);

  w.addDocument(doc);
  w.commit();
  w.forceMerge(1);   // force segment merge.
  w.close();

  IndexReader ir = DirectoryReader.open(dir);
  Document doc2 = ir.document(0);
  IndexableField f3 = doc2.getField("binary");
  b = f3.binaryValue().bytes;
  assertTrue(b != null);
  assertEquals(17, b.length, 17);
  assertEquals(87, b[0]);

  assertTrue(ir.document(0).getField("binary").binaryValue()!=null);
  assertTrue(ir.document(1).getField("binary").binaryValue()!=null);
  assertTrue(ir.document(2).getField("binary").binaryValue()!=null);

  assertEquals("value", ir.document(0).get("string"));
  assertEquals("value", ir.document(1).get("string"));
  assertEquals("value", ir.document(2).get("string"));


  // test that the terms were indexed.
  assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc1field1"), null, PostingsEnum.NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
  assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc2field1"), null, PostingsEnum.NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
  assertTrue(TestUtil.docs(random(), ir, "binary", new BytesRef("doc3field1"), null, PostingsEnum.NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
  assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc1field2"), null, PostingsEnum.NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
  assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc2field2"), null, PostingsEnum.NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);
  assertTrue(TestUtil.docs(random(), ir, "string", new BytesRef("doc3field2"), null, PostingsEnum.NONE).nextDoc() != DocIdSetIterator.NO_MORE_DOCS);

  ir.close();
  dir.close();

}