Java Code Examples for org.apache.lucene.store.NRTCachingDirectory

The following examples show how to use org.apache.lucene.store.NRTCachingDirectory. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: linden   Source File: LindenCoreImpl.java    License: Apache License 2.0 6 votes vote down vote up
public Directory createTaxoIndexDirectory(String directory, LindenConfig.IndexType indexType) throws IOException {
  switch (indexType) {
    case RAM:
      return new RAMDirectory();
    default:
      Preconditions.checkNotNull(directory, "index directory can not be null");
      return new NRTCachingDirectory(FSDirectory.open(new File(directory + ".taxonomy")),
                                     maxMergeSizeMB, maxCachedMB);
  }
}
 
Example 2
Source Project: lucene-solr   Source File: BaseCompoundFormatTestCase.java    License: Apache License 2.0 6 votes vote down vote up
public void testLargeCFS() throws IOException {   
  final String testfile = "_123.test";
  IOContext context = new IOContext(new FlushInfo(0, 512*1024*1024));

  Directory dir = new NRTCachingDirectory(newFSDirectory(createTempDir()), 2.0, 25.0);

  SegmentInfo si = newSegmentInfo(dir, "_123");
  try (IndexOutput out = dir.createOutput(testfile, context)) {
    CodecUtil.writeIndexHeader(out, "Foo", 0, si.getId(), "suffix");
    byte[] bytes = new byte[512];
    for(int i=0;i<1024*1024;i++) {
      out.writeBytes(bytes, 0, bytes.length);
    }
    CodecUtil.writeFooter(out);
  }
  
  si.setFiles(Collections.singleton(testfile));
  si.getCodec().compoundFormat().write(dir, si, context);

  dir.close();
}
 
Example 3
Source Project: lumongo   Source File: LumongoIndex.java    License: Apache License 2.0 6 votes vote down vote up
public DirectoryTaxonomyWriter getTaxoWriter(int segmentNumber) throws IOException {

		Directory d;

		if (indexConfig.getIndexSettings().getStoreIndexOnDisk()) {
			d = MMapDirectory.open(getPathForFacetsIndex(segmentNumber));
		}
		else {
			String indexSegmentDbName = getIndexSegmentDbName(segmentNumber);
			String indexSegmentCollectionName = getIndexSegmentCollectionName(segmentNumber) + "_facets";
			MongoDirectory mongoDirectory = new MongoDirectory(mongo, indexSegmentDbName, indexSegmentCollectionName, clusterConfig.isSharded(),
					clusterConfig.getIndexBlockSize());
			d = new DistributedDirectory(mongoDirectory);
		}

		NRTCachingDirectory nrtCachingDirectory = new NRTCachingDirectory(d, 2, 10);

		return new DirectoryTaxonomyWriter(nrtCachingDirectory);
	}
 
Example 4
Source Project: linden   Source File: LindenCoreImpl.java    License: Apache License 2.0 5 votes vote down vote up
public Directory createIndexDirectory(String directory, LindenConfig.IndexType indexType) throws IOException {
  switch (indexType) {
    case RAM:
      return new RAMDirectory();
    default:
      Preconditions.checkNotNull(directory, "index directory can not be null");
      return new NRTCachingDirectory(FSDirectory.open(new File(directory)), maxMergeSizeMB, maxCachedMB);
  }
}
 
Example 5
@Override
protected Directory getDirectory(Directory in) {
  // Randomly swap in NRTCachingDir
  if (random().nextBoolean()) {
    if (VERBOSE) {
      System.out.println("TEST: wrap NRTCachingDir");
    }

    return new NRTCachingDirectory(in, 5.0, 60.0);
  } else {
    return in;
  }
}
 
Example 6
Source Project: lucene-solr   Source File: TestBinaryDocValuesUpdates.java    License: Apache License 2.0 5 votes vote down vote up
public void testIOContext() throws Exception {
  // LUCENE-5591: make sure we pass an IOContext with an approximate
  // segmentSize in FlushInfo
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  // we want a single large enough segment so that a doc-values update writes a large file
  conf.setMergePolicy(NoMergePolicy.INSTANCE);
  conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
  conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
  IndexWriter writer = new IndexWriter(dir, conf);
  for (int i = 0; i < 100; i++) {
    writer.addDocument(doc(i));
  }
  writer.commit();
  writer.close();
  
  NRTCachingDirectory cachingDir = new NRTCachingDirectory(dir, 100, 1/(1024.*1024.));
  conf = newIndexWriterConfig(new MockAnalyzer(random()));
  // we want a single large enough segment so that a doc-values update writes a large file
  conf.setMergePolicy(NoMergePolicy.INSTANCE);
  conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
  conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
  writer = new IndexWriter(cachingDir, conf);
  writer.updateBinaryDocValue(new Term("id", "doc-0"), "val", toBytes(100L));
  DirectoryReader reader = DirectoryReader.open(writer); // flush
  assertEquals(0, cachingDir.listCachedFiles().length);
  
  IOUtils.close(reader, writer, cachingDir);
}
 
Example 7
Source Project: lucene-solr   Source File: TestNumericDocValuesUpdates.java    License: Apache License 2.0 5 votes vote down vote up
public void testIOContext() throws Exception {
  // LUCENE-5591: make sure we pass an IOContext with an approximate
  // segmentSize in FlushInfo
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  // we want a single large enough segment so that a doc-values update writes a large file
  conf.setMergePolicy(NoMergePolicy.INSTANCE);
  conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
  conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
  IndexWriter writer = new IndexWriter(dir, conf);
  for (int i = 0; i < 100; i++) {
    writer.addDocument(doc(i));
  }
  writer.commit();
  writer.close();
  
  NRTCachingDirectory cachingDir = new NRTCachingDirectory(dir, 100, 1/(1024.*1024.));
  conf = newIndexWriterConfig(new MockAnalyzer(random()));
  // we want a single large enough segment so that a doc-values update writes a large file
  conf.setMergePolicy(NoMergePolicy.INSTANCE);
  conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
  conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
  writer = new IndexWriter(cachingDir, conf);
  writer.updateNumericDocValue(new Term("id", "doc-0"), "val", 100L);
  DirectoryReader reader = DirectoryReader.open(writer); // flush
  assertEquals(0, cachingDir.listCachedFiles().length);
  
  IOUtils.close(reader, writer, cachingDir);
}
 
Example 8
Source Project: lucene-solr   Source File: MockDirectoryFactory.java    License: Apache License 2.0 5 votes vote down vote up
private Directory reduce(Directory dir) {
  Directory cdir = dir;
  if (dir instanceof NRTCachingDirectory) {
    cdir = ((NRTCachingDirectory)dir).getDelegate();
  }
  if (cdir instanceof TrackingDirectoryWrapper) {
    cdir = ((TrackingDirectoryWrapper)dir).getDelegate();
  }
  return cdir;
}
 
Example 9
Source Project: lucene-solr   Source File: MockFSDirectoryFactory.java    License: Apache License 2.0 5 votes vote down vote up
private Directory reduce(Directory dir) {
  Directory cdir = dir;
  if (dir instanceof NRTCachingDirectory) {
    cdir = ((NRTCachingDirectory)dir).getDelegate();
  }
  if (cdir instanceof TrackingDirectoryWrapper) {
    cdir = ((TrackingDirectoryWrapper)dir).getDelegate();
  }
  return cdir;
}
 
Example 10
Source Project: lucene-solr   Source File: NRTCachingDirectoryFactory.java    License: Apache License 2.0 4 votes vote down vote up
@Override
protected Directory create(String path, LockFactory lockFactory, DirContext dirContext) throws IOException {
  // we pass NoLockFactory, because the real lock factory is set later by injectLockFactory:
  return new NRTCachingDirectory(FSDirectory.open(new File(path).toPath(), lockFactory), maxMergeSizeMB, maxCachedMB);
}
 
Example 11
Source Project: stratio-cassandra   Source File: LuceneIndex.java    License: Apache License 2.0 4 votes vote down vote up
/**
 * Initializes this using the specified {@link Sort} for trying to keep the {@link Document}s sorted.
 *
 * @param sort The {@link Sort} to be used.
 */
public void init(Sort sort) {
    Log.debug("Initializing index");
    try {
        this.sort = sort;

        // Get directory file
        file = new File(path);

        // Open or create directory
        FSDirectory fsDirectory = FSDirectory.open(file);
        directory = new NRTCachingDirectory(fsDirectory, maxMergeMB, maxCachedMB);

        // Setup index writer
        IndexWriterConfig config = new IndexWriterConfig(Version.LUCENE_48, analyzer);
        config.setRAMBufferSizeMB(ramBufferMB);
        config.setOpenMode(IndexWriterConfig.OpenMode.CREATE_OR_APPEND);
        config.setUseCompoundFile(true);
        config.setMergePolicy(new SortingMergePolicy(config.getMergePolicy(), sort));
        indexWriter = new IndexWriter(directory, config);

        // Setup NRT search
        SearcherFactory searcherFactory = new SearcherFactory() {
            public IndexSearcher newSearcher(IndexReader reader) throws IOException {
                IndexSearcher searcher = new IndexSearcher(reader);
                searcher.setSimilarity(new NoIDFSimilarity());
                return searcher;
            }
        };
        TrackingIndexWriter trackingIndexWriter = new TrackingIndexWriter(indexWriter);
        searcherManager = new SearcherManager(indexWriter, true, searcherFactory);
        searcherReopener = new ControlledRealTimeReopenThread<>(trackingIndexWriter,
                                                                searcherManager,
                                                                refreshSeconds,
                                                                refreshSeconds);
        searcherReopener.start(); // Start the refresher thread
    } catch (IOException e) {
        Log.error(e, "Error while initializing index");
        throw new RuntimeException(e);
    }
}