Java Code Examples for org.apache.lucene.store.ByteBuffersDirectory

The following examples show how to use org.apache.lucene.store.ByteBuffersDirectory. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: jstarcraft-core   Source File: LuceneEngine.java    License: Apache License 2.0 6 votes vote down vote up
public LuceneEngine(IndexWriterConfig config, Path path) {
    try {
        this.config = config;
        Directory transienceDirectory = new ByteBuffersDirectory();
        this.transienceManager = new TransienceManager((IndexWriterConfig) BeanUtils.cloneBean(config), transienceDirectory);
        Directory persistenceDirectory = FSDirectory.open(path);
        this.persistenceManager = new PersistenceManager((IndexWriterConfig) BeanUtils.cloneBean(config), persistenceDirectory);
        this.searcher = new LuceneSearcher(this.transienceManager, this.persistenceManager);

        this.semaphore = new AtomicInteger();
        ReadWriteLock lock = new ReentrantReadWriteLock();
        this.readLock = lock.readLock();
        this.writeLock = lock.writeLock();
    } catch (Exception exception) {
        throw new StorageException(exception);
    }
}
 
Example 2
Source Project: lucene-solr   Source File: PerfRunData.java    License: Apache License 2.0 6 votes vote down vote up
private Directory createDirectory(boolean eraseIndex, String dirName,
    String dirParam) throws IOException {
  String dirImpl = config.get(dirParam, DEFAULT_DIRECTORY);
  if ("FSDirectory".equals(dirImpl)) {
    Path workDir = Paths.get(config.get("work.dir", "work"));
    Path indexDir = workDir.resolve(dirName);
    if (eraseIndex && Files.exists(indexDir)) {
      IOUtils.rm(indexDir);
    }
    Files.createDirectories(indexDir);
    return FSDirectory.open(indexDir);
  }

  if ("RAMDirectory".equals(dirImpl)) {
    throw new IOException("RAMDirectory has been removed, use ByteBuffersDirectory.");
  }

  if ("ByteBuffersDirectory".equals(dirImpl)) {
    return new ByteBuffersDirectory();
  }

  throw new IOException("Directory type not supported: " + dirImpl);
}
 
Example 3
Source Project: lucene-solr   Source File: PayloadHelper.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Sets up a RAM-resident Directory, and adds documents (using English.intToEnglish()) with two fields: field and multiField
 * and analyzes them using the PayloadAnalyzer
 * @param similarity The Similarity class to use in the Searcher
 * @param numDocs The num docs to add
 * @return An IndexSearcher
 */
// TODO: randomize
public IndexSearcher setUp(Random random, Similarity similarity, int numDocs) throws IOException {
  Directory directory = new MockDirectoryWrapper(random, new ByteBuffersDirectory());
  PayloadAnalyzer analyzer = new PayloadAnalyzer();

  // TODO randomize this
  IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(
      analyzer).setSimilarity(similarity));
  // writer.infoStream = System.out;
  for (int i = 0; i < numDocs; i++) {
    Document doc = new Document();
    doc.add(new TextField(FIELD, English.intToEnglish(i), Field.Store.YES));
    doc.add(new TextField(MULTI_FIELD, English.intToEnglish(i) + "  " + English.intToEnglish(i), Field.Store.YES));
    doc.add(new TextField(NO_PAYLOAD_FIELD, English.intToEnglish(i), Field.Store.YES));
    writer.addDocument(doc);
  }
  writer.forceMerge(1);
  reader = DirectoryReader.open(writer);
  writer.close();

  IndexSearcher searcher = LuceneTestCase.newSearcher(LuceneTestCase.getOnlyLeafReader(reader));
  searcher.setSimilarity(similarity);
  return searcher;
}
 
Example 4
Source Project: lucene-solr   Source File: SweetSpotSimilarityTest.java    License: Apache License 2.0 6 votes vote down vote up
private static float computeNorm(Similarity sim, String field, int length) throws IOException {
  String value = IntStream.range(0, length).mapToObj(i -> "a").collect(Collectors.joining(" "));
  Directory dir = new ByteBuffersDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setSimilarity(sim));
  w.addDocument(Collections.singleton(newTextField(field, value, Store.NO)));
  DirectoryReader reader = DirectoryReader.open(w);
  w.close();
  IndexSearcher searcher = new IndexSearcher(reader);
  searcher.setSimilarity(sim);
  Explanation expl = searcher.explain(new TermQuery(new Term(field, "a")), 0);
  reader.close();
  dir.close();
  Explanation norm = findExplanation(expl, "fieldNorm");
  assertNotNull(norm);
  return norm.getValue().floatValue();
}
 
Example 5
Source Project: lucene-solr   Source File: TestDirectoryTaxonomyReader.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testOpenIfChangedAndRefCount() throws Exception {
  Directory dir = new ByteBuffersDirectory(); // no need for random directories here

  DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(dir);
  taxoWriter.addCategory(new FacetLabel("a"));
  taxoWriter.commit();

  TaxonomyReader taxoReader = new DirectoryTaxonomyReader(dir);
  assertEquals("wrong refCount", 1, taxoReader.getRefCount());

  taxoReader.incRef();
  assertEquals("wrong refCount", 2, taxoReader.getRefCount());

  taxoWriter.addCategory(new FacetLabel("a", "b"));
  taxoWriter.commit();
  TaxonomyReader newtr = TaxonomyReader.openIfChanged(taxoReader);
  assertNotNull(newtr);
  taxoReader.close();
  taxoReader = newtr;
  assertEquals("wrong refCount", 1, taxoReader.getRefCount());

  taxoWriter.close();
  taxoReader.close();
  dir.close();
}
 
Example 6
Source Project: lucene-solr   Source File: QueryAutoStopWordAnalyzerTest.java    License: Apache License 2.0 6 votes vote down vote up
@Override
public void setUp() throws Exception {
  super.setUp();
  dir = new ByteBuffersDirectory();
  appAnalyzer = new MockAnalyzer(random(), MockTokenizer.WHITESPACE, false);
  IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(appAnalyzer));
  int numDocs = 200;
  for (int i = 0; i < numDocs; i++) {
    Document doc = new Document();
    String variedFieldValue = variedFieldValues[i % variedFieldValues.length];
    String repetitiveFieldValue = repetitiveFieldValues[i % repetitiveFieldValues.length];
    doc.add(new TextField("variedField", variedFieldValue, Field.Store.YES));
    doc.add(new TextField("repetitiveField", repetitiveFieldValue, Field.Store.YES));
    writer.addDocument(doc);
  }
  writer.close();
  reader = DirectoryReader.open(dir);
}
 
Example 7
Source Project: lucene-solr   Source File: StemmerTestBase.java    License: Apache License 2.0 6 votes vote down vote up
static void init(boolean ignoreCase, String affix, String... dictionaries) throws IOException, ParseException {
  if (dictionaries.length == 0) {
    throw new IllegalArgumentException("there must be at least one dictionary");
  }
  
  InputStream affixStream = StemmerTestBase.class.getResourceAsStream(affix);
  if (affixStream == null) {
    throw new FileNotFoundException("file not found: " + affix);
  }
  
  InputStream dictStreams[] = new InputStream[dictionaries.length];
  for (int i = 0; i < dictionaries.length; i++) {
    dictStreams[i] = StemmerTestBase.class.getResourceAsStream(dictionaries[i]);
    if (dictStreams[i] == null) {
      throw new FileNotFoundException("file not found: " + dictStreams[i]);
    }
  }
  
  try {
    Dictionary dictionary = new Dictionary(new ByteBuffersDirectory(), "dictionary", affixStream, Arrays.asList(dictStreams), ignoreCase);
    stemmer = new Stemmer(dictionary);
  } finally {
    IOUtils.closeWhileHandlingException(affixStream);
    IOUtils.closeWhileHandlingException(dictStreams);
  }
}
 
Example 8
public void testSingleBigValueCompression() throws IOException {
  try (final Directory dir = new ByteBuffersDirectory()) {
    final IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
    final IndexWriter iwriter = new IndexWriter(dir, iwc);

    final Document doc = new Document();
    final NumericDocValuesField dvf = new NumericDocValuesField("dv", 0);
    doc.add(dvf);
    for (int i = 0; i < 20000; ++i) {
      dvf.setLongValue(i & 1023);
      iwriter.addDocument(doc);
    }
    iwriter.forceMerge(1);
    final long size1 = dirSize(dir);
    dvf.setLongValue(Long.MAX_VALUE);
    iwriter.addDocument(doc);
    iwriter.forceMerge(1);
    final long size2 = dirSize(dir);
    // make sure the new value did not grow the bpv for every other value
    assertTrue(size2 < size1 + (20000 * (63 - 10)) / 8);
  }
}
 
Example 9
Source Project: lucene-solr   Source File: SingleFieldTestDb.java    License: Apache License 2.0 6 votes vote down vote up
public SingleFieldTestDb(Random random, String[] documents, String fName) {
  try {
    db = new MockDirectoryWrapper(random, new ByteBuffersDirectory());
    docs = documents;
    fieldName = fName;
    IndexWriter writer = new IndexWriter(db, new IndexWriterConfig(new MockAnalyzer(random)));
    for (int j = 0; j < docs.length; j++) {
      Document d = new Document();
      d.add(new TextField(fieldName, docs[j], Field.Store.NO));
      writer.addDocument(d);
    }
    writer.close();
  } catch (java.io.IOException ioe) {
    throw new Error(ioe);
  }
}
 
Example 10
Source Project: lucene-solr   Source File: TestMergeSchedulerExternal.java    License: Apache License 2.0 6 votes vote down vote up
public void testCustomMergeScheduler() throws Exception {
  // we don't really need to execute anything, just to make sure the custom MS
  // compiles. But ensure that it can be used as well, e.g., no other hidden
  // dependencies or something. Therefore, don't use any random API !
  Directory dir = new ByteBuffersDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(null);
  conf.setMergeScheduler(new ReportingMergeScheduler());
  IndexWriter writer = new IndexWriter(dir, conf);
  writer.addDocument(new Document());
  writer.commit(); // trigger flush
  writer.addDocument(new Document());
  writer.commit(); // trigger flush
  writer.forceMerge(1);
  writer.close();
  dir.close();
}
 
Example 11
Source Project: lucene-solr   Source File: TestPointValues.java    License: Apache License 2.0 6 votes vote down vote up
public void testMergedStatsOneSegmentWithoutPoints() throws IOException {
  Directory dir = new ByteBuffersDirectory();
  IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null).setMergePolicy(NoMergePolicy.INSTANCE));
  w.addDocument(new Document());
  DirectoryReader.open(w).close();
  Document doc = new Document();
  doc.add(new IntPoint("field", Integer.MIN_VALUE));
  w.addDocument(doc);
  IndexReader reader = DirectoryReader.open(w);

  assertArrayEquals(new byte[4], PointValues.getMinPackedValue(reader, "field"));
  assertArrayEquals(new byte[4], PointValues.getMaxPackedValue(reader, "field"));
  assertEquals(1, PointValues.getDocCount(reader, "field"));
  assertEquals(1, PointValues.size(reader, "field"));

  assertNull(PointValues.getMinPackedValue(reader, "field2"));
  assertNull(PointValues.getMaxPackedValue(reader, "field2"));
  assertEquals(0, PointValues.getDocCount(reader, "field2"));
  assertEquals(0, PointValues.size(reader, "field2"));
}
 
Example 12
Source Project: lucene-solr   Source File: TestPointValues.java    License: Apache License 2.0 6 votes vote down vote up
public void testMergedStatsAllPointsDeleted() throws IOException {
  Directory dir = new ByteBuffersDirectory();
  IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null));
  w.addDocument(new Document());
  Document doc = new Document();
  doc.add(new IntPoint("field", Integer.MIN_VALUE));
  doc.add(new StringField("delete", "yes", Store.NO));
  w.addDocument(doc);
  w.forceMerge(1);
  w.deleteDocuments(new Term("delete", "yes"));
  w.addDocument(new Document());
  w.forceMerge(1);
  IndexReader reader = DirectoryReader.open(w);

  assertNull(PointValues.getMinPackedValue(reader, "field"));
  assertNull(PointValues.getMaxPackedValue(reader, "field"));
  assertEquals(0, PointValues.getDocCount(reader, "field"));
  assertEquals(0, PointValues.size(reader, "field"));
}
 
Example 13
Source Project: lucene-solr   Source File: TestPendingDeletes.java    License: Apache License 2.0 6 votes vote down vote up
public void testIsFullyDeleted() throws IOException {
  Directory dir = new ByteBuffersDirectory();
  SegmentInfo si = new SegmentInfo(dir, Version.LATEST, Version.LATEST, "test", 3, false, Codec.getDefault(),
      Collections.emptyMap(), StringHelper.randomId(), new HashMap<>(), null);
  SegmentCommitInfo commitInfo = new SegmentCommitInfo(si, 0, 0, -1, -1, -1, StringHelper.randomId());
  FieldInfos fieldInfos = FieldInfos.EMPTY;
  si.getCodec().fieldInfosFormat().write(dir, si, "", fieldInfos, IOContext.DEFAULT);
  PendingDeletes deletes = newPendingDeletes(commitInfo);
  for (int i = 0; i < 3; i++) {
    assertTrue(deletes.delete(i));
    if (random().nextBoolean()) {
      assertTrue(deletes.writeLiveDocs(dir));
    }
    assertEquals(i == 2, deletes.isFullyDeleted(() -> null));
  }
}
 
Example 14
Source Project: lucene-solr   Source File: TestIndexWriterReader.java    License: Apache License 2.0 6 votes vote down vote up
/** Make sure if all we do is open NRT reader against
 *  writer, we don't see merge starvation. */
public void testTooManySegments() throws Exception {
  Directory dir = getAssertNoDeletesDirectory(new ByteBuffersDirectory());
  // Don't use newIndexWriterConfig, because we need a
  // "sane" mergePolicy:
  IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter w = new IndexWriter(dir, iwc);
  // Create 500 segments:
  for(int i=0;i<500;i++) {
    Document doc = new Document();
    doc.add(newStringField("id", ""+i, Field.Store.NO));
    w.addDocument(doc);
    IndexReader r = DirectoryReader.open(w);
    // Make sure segment count never exceeds 100:
    assertTrue(r.leaves().size() < 100);
    r.close();
  }
  w.close();
  dir.close();
}
 
Example 15
Source Project: lucene-solr   Source File: TestIndexWriter.java    License: Apache License 2.0 6 votes vote down vote up
public void testDeleteAllNRTLeftoverFiles() throws Exception {

    MockDirectoryWrapper d = new MockDirectoryWrapper(random(), new ByteBuffersDirectory());
    IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())));
    Document doc = new Document();
    for(int i = 0; i < 20; i++) {
      for(int j = 0; j < 100; ++j) {
        w.addDocument(doc);
      }
      w.commit();
      DirectoryReader.open(w).close();

      w.deleteAll();
      w.commit();
      // Make sure we accumulate no files except for empty
      // segments_N and segments.gen:
      assertTrue(d.listAll().length <= 2);
    }

    w.close();
    d.close();
  }
 
Example 16
Source Project: lucene-solr   Source File: TestIndexWriter.java    License: Apache License 2.0 6 votes vote down vote up
public void testNRTReaderVersion() throws Exception {
  Directory d = new MockDirectoryWrapper(random(), new ByteBuffersDirectory());
  IndexWriter w = new IndexWriter(d, new IndexWriterConfig(new MockAnalyzer(random())));
  Document doc = new Document();
  doc.add(newStringField("id", "0", Field.Store.YES));
  w.addDocument(doc);
  DirectoryReader r = w.getReader();
  long version = r.getVersion();
  r.close();

  w.addDocument(doc);
  r = w.getReader();
  long version2 = r.getVersion();
  r.close();
  assert(version2 > version);

  w.deleteDocuments(new Term("id", "0"));
  r = w.getReader();
  w.close();
  long version3 = r.getVersion();
  r.close();
  assert(version3 > version2);
  d.close();
}
 
Example 17
Source Project: lucene-solr   Source File: TestTryDelete.java    License: Apache License 2.0 6 votes vote down vote up
private static Directory createIndex ()
  throws IOException
{
  Directory directory = new ByteBuffersDirectory();

  IndexWriter writer = getWriter(directory);

  for (int i = 0; i < 10; i++) {
    Document doc = new Document();
    doc.add(new StringField("foo", String.valueOf(i), Store.YES));
    writer.addDocument(doc);
  }

  writer.commit();
  writer.close();

  return directory;
}
 
Example 18
Source Project: lucene-solr   Source File: TestAddIndexes.java    License: Apache License 2.0 6 votes vote down vote up
public RunAddIndexesThreads(int numCopy) throws Throwable {
  NUM_COPY = numCopy;
  dir = new MockDirectoryWrapper(random(), new ByteBuffersDirectory());
  IndexWriter writer = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random()))
      .setMaxBufferedDocs(2));
  for (int i = 0; i < NUM_INIT_DOCS; i++)
    addDoc(writer);
  writer.close();

  dir2 = newDirectory();
  writer2 = new IndexWriter(dir2, new IndexWriterConfig(new MockAnalyzer(random())));
  writer2.commit();
  

  readers = new DirectoryReader[NUM_COPY];
  for(int i=0;i<NUM_COPY;i++)
    readers[i] = DirectoryReader.open(dir);
  int numThreads = TEST_NIGHTLY ? 5 : 2;
  threads = new Thread[numThreads];
}
 
Example 19
Source Project: lucene-solr   Source File: TestSizeBoundedForceMerge.java    License: Apache License 2.0 6 votes vote down vote up
public void testLastSegmentTooLarge() throws Exception {
  Directory dir = new ByteBuffersDirectory();

  IndexWriterConfig conf = newWriterConfig();
  IndexWriter writer = new IndexWriter(dir, conf);

  addDocs(writer, 3);
  addDocs(writer, 3);
  addDocs(writer, 3);
  addDocs(writer, 5);
  
  writer.close();

  conf = newWriterConfig();
  LogMergePolicy lmp = new LogDocMergePolicy();
  lmp.setMaxMergeDocs(3);
  conf.setMergePolicy(lmp);
  
  writer = new IndexWriter(dir, conf);
  writer.forceMerge(1);
  writer.close();

  SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
  assertEquals(2, sis.size());
}
 
Example 20
Source Project: lucene-solr   Source File: TestSizeBoundedForceMerge.java    License: Apache License 2.0 6 votes vote down vote up
public void testFirstSegmentTooLarge() throws Exception {
  Directory dir = new ByteBuffersDirectory();
  
  IndexWriterConfig conf = newWriterConfig();
  IndexWriter writer = new IndexWriter(dir, conf);
  
  addDocs(writer, 5);
  addDocs(writer, 3);
  addDocs(writer, 3);
  addDocs(writer, 3);
  
  writer.close();
  
  conf = newWriterConfig();
  LogMergePolicy lmp = new LogDocMergePolicy();
  lmp.setMaxMergeDocs(3);
  conf.setMergePolicy(lmp);
  
  writer = new IndexWriter(dir, conf);
  writer.forceMerge(1);
  writer.close();
  
  SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
  assertEquals(2, sis.size());
}
 
Example 21
Source Project: lucene-solr   Source File: TestSizeBoundedForceMerge.java    License: Apache License 2.0 6 votes vote down vote up
public void testAllSegmentsSmall() throws Exception {
  Directory dir = new ByteBuffersDirectory();
  
  IndexWriterConfig conf = newWriterConfig();
  IndexWriter writer = new IndexWriter(dir, conf);
  
  addDocs(writer, 3);
  addDocs(writer, 3);
  addDocs(writer, 3);
  addDocs(writer, 3);
  
  writer.close();
  
  conf = newWriterConfig();
  LogMergePolicy lmp = new LogDocMergePolicy();
  lmp.setMaxMergeDocs(3);
  conf.setMergePolicy(lmp);
  
  writer = new IndexWriter(dir, conf);
  writer.forceMerge(1);
  writer.close();
  
  SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
  assertEquals(1, sis.size());
}
 
Example 22
Source Project: lucene-solr   Source File: TestSizeBoundedForceMerge.java    License: Apache License 2.0 6 votes vote down vote up
public void testAllSegmentsLarge() throws Exception {
  Directory dir = new ByteBuffersDirectory();
  
  IndexWriterConfig conf = newWriterConfig();
  IndexWriter writer = new IndexWriter(dir, conf);
  
  addDocs(writer, 3);
  addDocs(writer, 3);
  addDocs(writer, 3);
  
  writer.close();
  
  conf = newWriterConfig();
  LogMergePolicy lmp = new LogDocMergePolicy();
  lmp.setMaxMergeDocs(2);
  conf.setMergePolicy(lmp);
  
  writer = new IndexWriter(dir, conf);
  writer.forceMerge(1);
  writer.close();
  
  SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
  assertEquals(3, sis.size());
}
 
Example 23
Source Project: lucene-solr   Source File: TestSizeBoundedForceMerge.java    License: Apache License 2.0 6 votes vote down vote up
public void testOneLargeOneSmall() throws Exception {
  Directory dir = new ByteBuffersDirectory();
  
  IndexWriterConfig conf = newWriterConfig();
  IndexWriter writer = new IndexWriter(dir, conf);
  
  addDocs(writer, 3);
  addDocs(writer, 5);
  addDocs(writer, 3);
  addDocs(writer, 5);
  
  writer.close();
  
  conf = newWriterConfig();
  LogMergePolicy lmp = new LogDocMergePolicy();
  lmp.setMaxMergeDocs(3);
  conf.setMergePolicy(lmp);
  
  writer = new IndexWriter(dir, conf);
  writer.forceMerge(1);
  writer.close();
  
  SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
  assertEquals(4, sis.size());
}
 
Example 24
Source Project: lucene-solr   Source File: TestSizeBoundedForceMerge.java    License: Apache License 2.0 6 votes vote down vote up
public void testSingleNonMergeableSegment() throws Exception {
  Directory dir = new ByteBuffersDirectory();
  
  IndexWriterConfig conf = newWriterConfig();
  IndexWriter writer = new IndexWriter(dir, conf);
  
  addDocs(writer, 3, true);
  
  writer.close();
  
  conf = newWriterConfig();
  LogMergePolicy lmp = new LogDocMergePolicy();
  lmp.setMaxMergeDocs(3);
  conf.setMergePolicy(lmp);
  
  writer = new IndexWriter(dir, conf);
  writer.forceMerge(1);
  writer.close();
  
  // Verify that the last segment does not have deletions.
  SegmentInfos sis = SegmentInfos.readLatestCommit(dir);
  assertEquals(1, sis.size());
}
 
Example 25
Source Project: lucene-solr   Source File: TestMultiTermsEnum.java    License: Apache License 2.0 6 votes vote down vote up
public void testNoTermsInField() throws Exception {
  Directory directory = new ByteBuffersDirectory();
  IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(new MockAnalyzer(random())));
  Document document = new Document();
  document.add(new StringField("deleted", "0", Field.Store.YES));
  writer.addDocument(document);

  DirectoryReader reader = DirectoryReader.open(writer);
  writer.close();

  Directory directory2 = new ByteBuffersDirectory();
  writer = new IndexWriter(directory2, new IndexWriterConfig(new MockAnalyzer(random())));
  
  List<LeafReaderContext> leaves = reader.leaves();
  CodecReader[] codecReaders = new CodecReader[leaves.size()];
  for (int i = 0; i < leaves.size(); i++) {
    codecReaders[i] = new MigratingCodecReader((CodecReader) leaves.get(i).reader());
  }

  writer.addIndexes(codecReaders); // <- bang

  IOUtils.close(writer, reader, directory);
}
 
Example 26
Source Project: lucene-solr   Source File: HdfsDirectoryTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testRandomAccessWrites() throws IOException {
  int i = 0;
  try {
    Set<String> names = new HashSet<>();
    for (; i< 10; i++) {
      Directory fsDir = new ByteBuffersDirectory();
      String name = getName();
      System.out.println("Working on pass [" + i  +"] contains [" + names.contains(name) + "]");
      names.add(name);
      createFile(name,fsDir,directory);
      assertInputsEquals(name,fsDir,directory);
      fsDir.close();
    }
  } catch (Exception e) {
    e.printStackTrace();
    fail("Test failed on pass [" + i + "]");
  }
}
 
Example 27
@Before
public void setupIndex() throws Exception {
    dir = new ByteBuffersDirectory();

    try(IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER))) {
        for (int i = 0; i < docs.length; i++) {
            Document doc = new Document();
            doc.add(new Field("_id", Integer.toString(i + 1), StoredField.TYPE));
            doc.add(newTextField("text", docs[i], Field.Store.YES));
            indexWriter.addDocument(doc);
        }
    }

    reader = DirectoryReader.open(dir);
    searcher = new IndexSearcher(reader);
}
 
Example 28
Source Project: crate   Source File: LuceneBatchIteratorTest.java    License: Apache License 2.0 6 votes vote down vote up
@Before
public void prepareSearcher() throws Exception {
    IndexWriter iw = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new StandardAnalyzer()));
    String columnName = "x";
    expectedResult = new ArrayList<>(20);
    for (long i = 0; i < 20; i++) {
        Document doc = new Document();
        doc.add(new NumericDocValuesField(columnName, i));
        iw.addDocument(doc);
        expectedResult.add(new Object[] { i });
    }
    iw.commit();
    indexSearcher = new IndexSearcher(DirectoryReader.open(iw));
    LongColumnReference columnReference = new LongColumnReference(columnName);
    columnRefs = Collections.singletonList(columnReference);
}
 
Example 29
Source Project: crate   Source File: LuceneOrderedDocCollectorTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSearchNoScores() throws Exception {
    IndexWriter w = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
    KeywordFieldMapper.KeywordFieldType fieldType = new KeywordFieldMapper.KeywordFieldType();
    fieldType.setName("x");
    fieldType.freeze();

    for (int i = 0; i < 3; i++) {
        addDoc(w, fieldType, "Arthur");
    }
    addDoc(w, fieldType, "Arthur"); // not "Arthur" to lower score
    w.commit();
    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(w, true, true));

    List<LuceneCollectorExpression<?>> columnReferences = Collections.singletonList(new ScoreCollectorExpression());
    Query query = fieldType.termsQuery(Collections.singletonList("Arthur"), null);
    LuceneOrderedDocCollector collector = collector(searcher, columnReferences, query, null, false);
    KeyIterable<ShardId, Row> result = collector.collect();

    assertThat(Iterables.size(result), is(2));

    Iterator<Row> values = result.iterator();

    assertThat(values.next().get(0), Matchers.is(Float.NaN));
    assertThat(values.next().get(0), Matchers.is(Float.NaN));
}
 
Example 30
Source Project: crate   Source File: LuceneOrderedDocCollectorTest.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testSearchWithScores() throws Exception {
    IndexWriter w = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(new KeywordAnalyzer()));
    KeywordFieldMapper.KeywordFieldType fieldType = new KeywordFieldMapper.KeywordFieldType();
    fieldType.setName("x");
    fieldType.freeze();

    for (int i = 0; i < 3; i++) {
        addDoc(w, fieldType, "Arthur");
    }
    addDoc(w, fieldType, "Arthur"); // not "Arthur" to lower score
    w.commit();
    IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(w, true, true));

    List<LuceneCollectorExpression<?>> columnReferences = Collections.singletonList(new ScoreCollectorExpression());
    Query query = fieldType.termsQuery(Collections.singletonList("Arthur"), null);
    LuceneOrderedDocCollector collector = collector(searcher, columnReferences, query, null, true);
    KeyIterable<ShardId, Row> result = collector.collect();

    assertThat(Iterables.size(result), is(2));

    Iterator<Row> values = result.iterator();

    assertThat(values.next().get(0), Matchers.is(1.0F));
    assertThat(values.next().get(0), Matchers.is(1.0F));
}