Java Code Examples for org.apache.lucene.store.IOContext#DEFAULT

The following examples show how to use org.apache.lucene.store.IOContext#DEFAULT . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CacheIndexOutputTest.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
@Test
public void test1() throws IOException {
  Random random = new Random(seed);
  RAMDirectory directory = new RAMDirectory();

  Cache cache = CacheIndexInputTest.getCache();
  CacheIndexOutput indexOutput = new CacheIndexOutput(null, "test", cache, directory, IOContext.DEFAULT);
  indexOutput.writeByte((byte) 1);
  indexOutput.writeByte((byte) 2);
  byte[] b = new byte[16000];
  random.nextBytes(b);
  indexOutput.writeBytes(b, 16000);
  indexOutput.close();

  IndexInput input = directory.openInput("test", IOContext.DEFAULT);
  assertEquals(16002, input.length());
  assertEquals(1, input.readByte());
  assertEquals(2, input.readByte());

  byte[] buf = new byte[16000];
  input.readBytes(buf, 0, 16000);
  input.close();
  assertArrayEquals(b, buf);
  directory.close();
}
 
Example 2
Source File: CacheIndexOutputTest.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
@Test
public void test2() throws IOException {
  Cache cache = CacheIndexInputTest.getCache();
  RAMDirectory directory = new RAMDirectory();
  RAMDirectory directory2 = new RAMDirectory();

  Random random = new Random(seed);

  String name = "test2";
  long size = (10 * 1024 * 1024) + 13;

  IndexOutput output = directory.createOutput(name, IOContext.DEFAULT);
  CacheIndexOutput cacheIndexOutput = new CacheIndexOutput(null, name, cache, directory2, IOContext.DEFAULT);
  CacheIndexInputTest.writeRandomData(size, random, output, cacheIndexOutput);
  output.close();
  cacheIndexOutput.close();

  IndexInput input = directory.openInput(name, IOContext.DEFAULT);
  IndexInput testInput = directory2.openInput(name, IOContext.DEFAULT);
  CacheIndexInputTest.readRandomData(input, testInput, random, sampleSize, maxBufSize, maxOffset);
  testInput.close();
  input.close();
  directory.close();
  directory2.close();
}
 
Example 3
Source File: TableCopyCommand.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private long copy(String file, Directory srcDirectory, HdfsDirectory destDirectory) throws IOException {
  long fileLength = srcDirectory.fileLength(file);
  IOContext context = IOContext.DEFAULT;
  IndexOutput os = null;
  IndexInput is = null;
  IOException priorException = null;
  try {
    os = destDirectory.createOutput(file, context);
    is = srcDirectory.openInput(file, context);
    os.copyBytes(is, is.length());
  } catch (IOException ioe) {
    priorException = ioe;
  } finally {
    boolean success = false;
    try {
      IOUtils.closeWhileHandlingException(priorException, os, is);
      success = true;
    } finally {
      if (!success) {
        try {
          destDirectory.deleteFile(file);
        } catch (Throwable t) {
        }
      }
    }
  }
  return fileLength;
}
 
Example 4
Source File: TestIndexWriterThreadsToSegments.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testDocsStuckInRAMForever() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
  iwc.setRAMBufferSizeMB(.2);
  Codec codec = TestUtil.getDefaultCodec();
  iwc.setCodec(codec);
  iwc.setMergePolicy(NoMergePolicy.INSTANCE);
  final IndexWriter w = new IndexWriter(dir, iwc);
  final CountDownLatch startingGun = new CountDownLatch(1);
  Thread[] threads = new Thread[2];
  for(int i=0;i<threads.length;i++) {
    final int threadID = i;
    threads[i] = new Thread() {
        @Override
        public void run() {
          try {
            startingGun.await();
            for(int j=0;j<1000;j++) {
              Document doc = new Document();
              doc.add(newStringField("field", "threadID" + threadID, Field.Store.NO));
              w.addDocument(doc);
            }
          } catch (Exception e) {
            throw new RuntimeException(e);
          }
        }
      };
    threads[i].start();
  }

  startingGun.countDown();
  for(Thread t : threads) {
    t.join();
  }

  Set<String> segSeen = new HashSet<>();
  int thread0Count = 0;
  int thread1Count = 0;

  // At this point the writer should have 2 thread states w/ docs; now we index with only 1 thread until we see all 1000 thread0 & thread1
  // docs flushed.  If the writer incorrectly holds onto previously indexed docs forever then this will run forever:
  long counter = 0;
  long checkAt = 100;
  while (thread0Count < 1000 || thread1Count < 1000) {
    Document doc = new Document();
    doc.add(newStringField("field", "threadIDmain", Field.Store.NO));
    w.addDocument(doc);
    if (counter++ == checkAt) {
      for(String fileName : dir.listAll()) {
        if (fileName.endsWith(".si")) {
          String segName = IndexFileNames.parseSegmentName(fileName);
          if (segSeen.contains(segName) == false) {
            segSeen.add(segName);
            byte id[] = readSegmentInfoID(dir, fileName);
            SegmentInfo si = TestUtil.getDefaultCodec().segmentInfoFormat().read(dir, segName, id, IOContext.DEFAULT);
            si.setCodec(codec);
            SegmentCommitInfo sci = new SegmentCommitInfo(si, 0, 0, -1, -1, -1, StringHelper.randomId());
            SegmentReader sr = new SegmentReader(sci, Version.LATEST.major, IOContext.DEFAULT);
            try {
              thread0Count += sr.docFreq(new Term("field", "threadID0"));
              thread1Count += sr.docFreq(new Term("field", "threadID1"));
            } finally {
              sr.close();
            }
          }
        }
      }

      checkAt = (long) (checkAt * 1.25);
      counter = 0;
    }
  }

  w.close();
  dir.close();
}