Java Code Examples for org.apache.lucene.util.TestUtil#reduceOpenFiles()

The following examples show how to use org.apache.lucene.util.TestUtil#reduceOpenFiles() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SimplePrimaryNode.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private static IndexWriter initWriter(int id, Random random, Path indexPath, boolean doCheckIndexOnClose) throws IOException {
  Directory dir = SimpleReplicaNode.getDirectory(random, id, indexPath, doCheckIndexOnClose);

  MockAnalyzer analyzer = new MockAnalyzer(random);
  analyzer.setMaxTokenLength(TestUtil.nextInt(random, 1, IndexWriter.MAX_TERM_LENGTH));
  IndexWriterConfig iwc = LuceneTestCase.newIndexWriterConfig(random, analyzer);

  MergePolicy mp = iwc.getMergePolicy();
  //iwc.setInfoStream(new PrintStreamInfoStream(System.out));

  // Force more frequent merging so we stress merge warming:
  if (mp instanceof TieredMergePolicy) {
    TieredMergePolicy tmp = (TieredMergePolicy) mp;
    tmp.setSegmentsPerTier(3);
    tmp.setMaxMergeAtOnce(3);
  } else if (mp instanceof LogMergePolicy) {
    LogMergePolicy lmp = (LogMergePolicy) mp;
    lmp.setMergeFactor(3);
  }

  IndexWriter writer = new IndexWriter(dir, iwc);

  TestUtil.reduceOpenFiles(writer);
  return writer;
}
 
Example 2
Source File: TestIndexWriterReader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public AddDirectoriesThreads(int numDirs, IndexWriter mainWriter) throws Throwable {
  this.numDirs = numDirs;
  this.mainWriter = mainWriter;
  addDir = newDirectory();
  IndexWriter writer = new IndexWriter(addDir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                 .setMaxBufferedDocs(2));
  TestUtil.reduceOpenFiles(writer);
  for (int i = 0; i < NUM_INIT_DOCS; i++) {
    Document doc = DocHelper.createDocument(i, "addindex", 4);
    writer.addDocument(doc);
  }
    
  writer.close();
  
  readers = new DirectoryReader[numDirs];
  for (int i = 0; i < numDirs; i++)
    readers[i] = DirectoryReader.open(addDir);
}
 
Example 3
Source File: TestIndexWriterReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Slow
public void testAddIndexesAndDoDeletesThreads() throws Throwable {
  final int numIter = TEST_NIGHTLY ? 2 : 1;
  int numDirs = TEST_NIGHTLY ? 3 : 2;
  
  Directory mainDir = getAssertNoDeletesDirectory(newDirectory());

  IndexWriter mainWriter = new IndexWriter(mainDir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                      .setMergePolicy(newLogMergePolicy()));
  TestUtil.reduceOpenFiles(mainWriter);

  AddDirectoriesThreads addDirThreads = new AddDirectoriesThreads(numIter, mainWriter);
  addDirThreads.launchThreads(numDirs);
  addDirThreads.joinThreads();
  
  //assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.numThreads
  //    * addDirThreads.NUM_INIT_DOCS, addDirThreads.mainwriter.getDocStats().numDocs);
  assertEquals(addDirThreads.count.intValue(), addDirThreads.mainWriter.getDocStats().numDocs);

  addDirThreads.close(true);
  
  assertTrue(addDirThreads.failures.size() == 0);

  TestUtil.checkIndex(mainDir);

  IndexReader reader = DirectoryReader.open(mainDir);
  assertEquals(addDirThreads.count.intValue(), reader.numDocs());
  //assertEquals(100 + numDirs * (3 * numIter / 4) * addDirThreads.numThreads
  //    * addDirThreads.NUM_INIT_DOCS, reader.numDocs());
  reader.close();

  addDirThreads.closeDir();
  mainDir.close();
}
 
Example 4
Source File: BaseTestRangeFilter.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private static IndexReader build(Random random, TestIndex index) throws IOException {
  /* build an index */
  
  Document doc = new Document();
  Field idField = newStringField(random, "id", "", Field.Store.YES);
  Field idDVField = new SortedDocValuesField("id", new BytesRef());
  Field intIdField = new IntPoint("id_int", 0);
  Field intDVField = new NumericDocValuesField("id_int", 0);
  Field floatIdField = new FloatPoint("id_float", 0);
  Field floatDVField = new NumericDocValuesField("id_float", 0);
  Field longIdField = new LongPoint("id_long", 0);
  Field longDVField = new NumericDocValuesField("id_long", 0);
  Field doubleIdField = new DoublePoint("id_double", 0);
  Field doubleDVField = new NumericDocValuesField("id_double", 0);
  Field randField = newStringField(random, "rand", "", Field.Store.YES);
  Field randDVField = new SortedDocValuesField("rand", new BytesRef());
  Field bodyField = newStringField(random, "body", "", Field.Store.NO);
  Field bodyDVField = new SortedDocValuesField("body", new BytesRef());
  doc.add(idField);
  doc.add(idDVField);
  doc.add(intIdField);
  doc.add(intDVField);
  doc.add(floatIdField);
  doc.add(floatDVField);
  doc.add(longIdField);
  doc.add(longDVField);
  doc.add(doubleIdField);
  doc.add(doubleDVField);
  doc.add(randField);
  doc.add(randDVField);
  doc.add(bodyField);
  doc.add(bodyDVField);

  RandomIndexWriter writer = new RandomIndexWriter(random, index.index, 
                                                   newIndexWriterConfig(random, new MockAnalyzer(random))
                                                   .setOpenMode(OpenMode.CREATE).setMaxBufferedDocs(TestUtil.nextInt(random, 50, 1000)).setMergePolicy(newLogMergePolicy()));
  TestUtil.reduceOpenFiles(writer.w);

  while(true) {

    int minCount = 0;
    int maxCount = 0;

    for (int d = minId; d <= maxId; d++) {
      idField.setStringValue(pad(d));
      idDVField.setBytesValue(new BytesRef(pad(d)));
      intIdField.setIntValue(d);
      intDVField.setLongValue(d);
      floatIdField.setFloatValue(d);
      floatDVField.setLongValue(Float.floatToRawIntBits(d));
      longIdField.setLongValue(d);
      longDVField.setLongValue(d);
      doubleIdField.setDoubleValue(d);
      doubleDVField.setLongValue(Double.doubleToRawLongBits(d));
      int r = index.allowNegativeRandomInts ? random.nextInt() : random
        .nextInt(Integer.MAX_VALUE);
      if (index.maxR < r) {
        index.maxR = r;
        maxCount = 1;
      } else if (index.maxR == r) {
        maxCount++;
      }

      if (r < index.minR) {
        index.minR = r;
        minCount = 1;
      } else if (r == index.minR) {
        minCount++;
      }
      randField.setStringValue(pad(r));
      randDVField.setBytesValue(new BytesRef(pad(r)));
      bodyField.setStringValue("body");
      bodyDVField.setBytesValue(new BytesRef("body"));
      writer.addDocument(doc);
    }

    if (minCount == 1 && maxCount == 1) {
      // our subclasses rely on only 1 doc having the min or
      // max, so, we loop until we satisfy that.  it should be
      // exceedingly rare (Yonik calculates 1 in ~429,000)
      // times) that this loop requires more than one try:
      IndexReader ir = writer.getReader();
      writer.close();
      return ir;
    }

    // try again
    writer.deleteAll();
  }
}
 
Example 5
Source File: TestIndexWriterCommit.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testCommitThreadSafety() throws Throwable {
  final int NUM_THREADS = 5;
  final double RUN_SEC = 0.5;
  final Directory dir = newDirectory();
  final RandomIndexWriter w = new RandomIndexWriter(random(), dir, newIndexWriterConfig(new MockAnalyzer(random()))
                                                                     .setMergePolicy(newLogMergePolicy()));
  TestUtil.reduceOpenFiles(w.w);
  w.commit();
  final AtomicBoolean failed = new AtomicBoolean();
  Thread[] threads = new Thread[NUM_THREADS];
  final long endTime = System.currentTimeMillis()+((long) (RUN_SEC*1000));
  for(int i=0;i<NUM_THREADS;i++) {
    final int finalI = i;
    threads[i] = new Thread() {
        @Override
        public void run() {
          try {
            final Document doc = new Document();
            DirectoryReader r = DirectoryReader.open(dir);
            Field f = newStringField("f", "", Field.Store.NO);
            doc.add(f);
            int count = 0;
            do {
              if (failed.get()) break;
              for(int j=0;j<10;j++) {
                final String s = finalI + "_" + String.valueOf(count++);
                f.setStringValue(s);
                w.addDocument(doc);
                w.commit();
                DirectoryReader r2 = DirectoryReader.openIfChanged(r);
                assertNotNull(r2);
                assertTrue(r2 != r);
                r.close();
                r = r2;
                assertEquals("term=f:" + s + "; r=" + r, 1, r.docFreq(new Term("f", s)));
              }
            } while(System.currentTimeMillis() < endTime);
            r.close();
          } catch (Throwable t) {
            failed.set(true);
            throw new RuntimeException(t);
          }
        }
      };
    threads[i].start();
  }
  for(int i=0;i<NUM_THREADS;i++) {
    threads[i].join();
  }
  assertFalse(failed.get());
  w.close();
  dir.close();
}