Java Code Examples for org.apache.lucene.store.Directory#close()

The following examples show how to use org.apache.lucene.store.Directory#close() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestConstantScoreQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testPropagatesApproximations() throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  Field f = newTextField("field", "a b", Field.Store.NO);
  doc.add(f);
  w.addDocument(doc);
  w.commit();

  DirectoryReader reader = w.getReader();
  final IndexSearcher searcher = newSearcher(reader);
  searcher.setQueryCache(null); // to still have approximations

  PhraseQuery pq = new PhraseQuery("field", "a", "b");

  Query q = searcher.rewrite(new ConstantScoreQuery(pq));

  final Weight weight = searcher.createWeight(q, ScoreMode.COMPLETE, 1);
  final Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0));
  assertNotNull(scorer.twoPhaseIterator());

  reader.close();
  w.close();
  dir.close();
}
 
Example 2
Source File: BaseFieldInfoFormatTestCase.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** Test field infos attributes coming back are not mutable */
public void testImmutableAttributes() throws Exception {
  Directory dir = newDirectory();
  Codec codec = getCodec();
  SegmentInfo segmentInfo = newSegmentInfo(dir, "_123");
  FieldInfos.Builder builder = new FieldInfos.Builder(new FieldInfos.FieldNumbers(null));
  FieldInfo fi = builder.getOrAdd("field");
  fi.setIndexOptions(TextField.TYPE_STORED.indexOptions());
  addAttributes(fi);
  fi.putAttribute("foo", "bar");
  fi.putAttribute("bar", "baz");
  FieldInfos infos = builder.finish();
  codec.fieldInfosFormat().write(dir, segmentInfo, "", infos, IOContext.DEFAULT);
  FieldInfos infos2 = codec.fieldInfosFormat().read(dir, segmentInfo, "", IOContext.DEFAULT);
  assertEquals(1, infos2.size());
  assertNotNull(infos2.fieldInfo("field"));
  Map<String,String> attributes = infos2.fieldInfo("field").attributes();
  // shouldn't be able to modify attributes
  expectThrows(UnsupportedOperationException.class, () -> {
    attributes.put("bogus", "bogus");
  });

  dir.close();
}
 
Example 3
Source File: TestBKDRadixSelector.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testRandomAllDimensionsEquals() throws IOException {
  int values =  TestUtil.nextInt(random(), 15000, 20000);
  Directory dir = getDirectory(values);
  int partitionPoint = random().nextInt(values);
  int sortedOnHeap = random().nextInt(5000);
  int dimensions =  TestUtil.nextInt(random(), 1, 8);
  int bytesPerDimensions = TestUtil.nextInt(random(), 2, 30);
  int packedLength = dimensions * bytesPerDimensions;
  PointWriter points = getRandomPointWriter(dir, values, packedLength);
  byte[] value = new byte[packedLength];
  random().nextBytes(value);
  for (int i =0; i < values; i++) {
    if (random().nextBoolean()) {
      points.append(value, i);
    } else {
      points.append(value, random().nextInt(values));
    }
  }
  points.close();
  verify(dir, points, dimensions, dimensions, 0, values, partitionPoint, packedLength, bytesPerDimensions, sortedOnHeap);
  dir.close();
}
 
Example 4
Source File: TestTaxonomyCombined.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testNRT() throws Exception {
  Directory dir = newDirectory();
  DirectoryTaxonomyWriter writer = new DirectoryTaxonomyWriter(dir);
  TaxonomyReader reader = new DirectoryTaxonomyReader(writer);
  
  FacetLabel cp = new FacetLabel("a");
  writer.addCategory(cp);
  TaxonomyReader newReader = TaxonomyReader.openIfChanged(reader);
  assertNotNull("expected a new instance", newReader);
  assertEquals(2, newReader.getSize());
  assertNotSame(TaxonomyReader.INVALID_ORDINAL, newReader.getOrdinal(cp));
  reader.close();
  reader = newReader;
  
  writer.close();
  reader.close();
  
  dir.close();
}
 
Example 5
Source File: TestNot.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testNot() throws Exception {
  Directory store = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), store);

  Document d1 = new Document();
  d1.add(newTextField("field", "a b", Field.Store.YES));

  writer.addDocument(d1);
  IndexReader reader = writer.getReader();

  IndexSearcher searcher = newSearcher(reader);

  BooleanQuery.Builder query = new BooleanQuery.Builder();
  query.add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD);
  query.add(new TermQuery(new Term("field", "b")), BooleanClause.Occur.MUST_NOT);

  ScoreDoc[] hits = searcher.search(query.build(), 1000).scoreDocs;
  assertEquals(0, hits.length);
  writer.close();
  reader.close();
  store.close();
}
 
Example 6
Source File: TestIndexWriter.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testChangesAfterClose() throws IOException {
  Directory dir = newDirectory();
  IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));

  addDoc(writer);

  // close
  writer.close();
  expectThrows(AlreadyClosedException.class, () -> {
    addDoc(writer);
  });

  dir.close();
}
 
Example 7
Source File: TestIndexWriterExceptions.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testExceptionOnCtor() throws Exception {
  UOEDirectory uoe = new UOEDirectory();
  Directory d = new MockDirectoryWrapper(random(), uoe);
  IndexWriter iw = new IndexWriter(d, newIndexWriterConfig(null));
  iw.addDocument(new Document());
  iw.close();
  uoe.doFail = true;
  expectThrows(UnsupportedOperationException.class, () -> {
    new IndexWriter(d, newIndexWriterConfig(null));
  });

  uoe.doFail = false;
  d.close();
}
 
Example 8
Source File: TestRangeFacetCounts.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testLongMinMax() throws Exception {

    Directory d = newDirectory();
    RandomIndexWriter w = new RandomIndexWriter(random(), d);
    Document doc = new Document();
    NumericDocValuesField field = new NumericDocValuesField("field", 0L);
    doc.add(field);
    field.setLongValue(Long.MIN_VALUE);
    w.addDocument(doc);
    field.setLongValue(0);
    w.addDocument(doc);
    field.setLongValue(Long.MAX_VALUE);
    w.addDocument(doc);

    IndexReader r = w.getReader();
    w.close();

    FacetsCollector fc = new FacetsCollector();
    IndexSearcher s = newSearcher(r);
    s.search(new MatchAllDocsQuery(), fc);

    Facets facets = new LongRangeFacetCounts("field", fc,
        new LongRange("min", Long.MIN_VALUE, true, Long.MIN_VALUE, true),
        new LongRange("max", Long.MAX_VALUE, true, Long.MAX_VALUE, true),
        new LongRange("all0", Long.MIN_VALUE, true, Long.MAX_VALUE, true),
        new LongRange("all1", Long.MIN_VALUE, false, Long.MAX_VALUE, true),
        new LongRange("all2", Long.MIN_VALUE, true, Long.MAX_VALUE, false),
        new LongRange("all3", Long.MIN_VALUE, false, Long.MAX_VALUE, false));

    FacetResult result = facets.getTopChildren(10, "field");
    assertEquals("dim=field path=[] value=3 childCount=6\n  min (1)\n  max (1)\n  all0 (3)\n  all1 (2)\n  all2 (2)\n  all3 (1)\n",
                 result.toString());
    
    r.close();
    d.close();
  }
 
Example 9
Source File: TestBinaryDocValuesUpdates.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testUpdateDocumentByMultipleTerms() throws Exception {
  // make sure the order of updates is respected, even when multiple terms affect same document
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter writer = new IndexWriter(dir, conf);
  
  Document doc = new Document();
  doc.add(new StringField("k1", "v1", Store.NO));
  doc.add(new StringField("k2", "v2", Store.NO));
  doc.add(new BinaryDocValuesField("bdv", toBytes(5L)));
  writer.addDocument(doc); // flushed document
  writer.commit();
  writer.addDocument(doc); // in-memory document
  
  writer.updateBinaryDocValue(new Term("k1", "v1"), "bdv", toBytes(17L));
  writer.updateBinaryDocValue(new Term("k2", "v2"), "bdv", toBytes(3L));
  writer.close();
  
  final DirectoryReader reader = DirectoryReader.open(dir);
  BinaryDocValues bdv = MultiDocValues.getBinaryValues(reader, "bdv");
  for (int i = 0; i < reader.maxDoc(); i++) {
    assertEquals(i, bdv.nextDoc());
    assertEquals(3, getValue(bdv));
  }
  reader.close();
  dir.close();
}
 
Example 10
Source File: TestDocValuesIndexing.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testTooLargeTermSortedSetBytes() throws IOException {
  Analyzer analyzer = new MockAnalyzer(random());

  Directory directory = newDirectory();
  // we don't use RandomIndexWriter because it might add more docvalues than we expect !!!!1
  IndexWriterConfig iwc = newIndexWriterConfig(analyzer);
  iwc.setMergePolicy(newLogMergePolicy());
  IndexWriter iwriter = new IndexWriter(directory, iwc);
  Document doc = new Document();
  doc.add(new SortedSetDocValuesField("dv", new BytesRef("just fine")));
  iwriter.addDocument(doc);
  
  Document hugeDoc = new Document();
  byte bytes[] = new byte[100000];
  BytesRef b = new BytesRef(bytes);
  random().nextBytes(bytes);
  hugeDoc.add(new SortedSetDocValuesField("dv", b));
  expectThrows(IllegalArgumentException.class, () -> {
    iwriter.addDocument(hugeDoc);
  });

  IndexReader ir = iwriter.getReader();
  assertEquals(1, ir.numDocs());
  ir.close();
  iwriter.close();
  directory.close();
}
 
Example 11
Source File: TestRangeFacetCounts.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testOverlappedEndStart() throws Exception {
  Directory d = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), d);
  Document doc = new Document();
  NumericDocValuesField field = new NumericDocValuesField("field", 0L);
  doc.add(field);
  for(long l=0;l<100;l++) {
    field.setLongValue(l);
    w.addDocument(doc);
  }
  field.setLongValue(Long.MAX_VALUE);
  w.addDocument(doc);

  IndexReader r = w.getReader();
  w.close();

  FacetsCollector fc = new FacetsCollector();
  IndexSearcher s = newSearcher(r);
  s.search(new MatchAllDocsQuery(), fc);

  Facets facets = new LongRangeFacetCounts("field", fc,
      new LongRange("0-10", 0L, true, 10L, true),
      new LongRange("10-20", 10L, true, 20L, true),
      new LongRange("20-30", 20L, true, 30L, true),
      new LongRange("30-40", 30L, true, 40L, true));
  
  FacetResult result = facets.getTopChildren(10, "field");
  assertEquals("dim=field path=[] value=41 childCount=4\n  0-10 (11)\n  10-20 (11)\n  20-30 (11)\n  30-40 (11)\n",
               result.toString());
  
  r.close();
  d.close();
}
 
Example 12
Source File: TestDictionary.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testInvalidFlags() throws Exception {
  InputStream affixStream = getClass().getResourceAsStream("broken-flags.aff");
  InputStream dictStream = getClass().getResourceAsStream("simple.dic");
  Directory tempDir = getDirectory();
  
  Exception expected = expectThrows(Exception.class, () -> {
    new Dictionary(tempDir, "dictionary", affixStream, dictStream);
  });
  assertTrue(expected.getMessage().startsWith("expected only one flag"));
  
  affixStream.close();
  dictStream.close();
  tempDir.close();
}
 
Example 13
Source File: TermVectorsAdapterTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
protected void createIndex() throws IOException {
  indexDir = createTempDir("testIndex");

  Directory dir = newFSDirectory(indexDir);
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, new StandardAnalyzer());

  FieldType textType = new FieldType();
  textType.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
  textType.setTokenized(true);
  textType.setStoreTermVectors(true);

  FieldType textType_pos = new FieldType();
  textType_pos.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
  textType_pos.setTokenized(true);
  textType_pos.setStoreTermVectors(true);
  textType_pos.setStoreTermVectorPositions(true);

  FieldType textType_pos_offset = new FieldType();
  textType_pos_offset.setIndexOptions(IndexOptions.DOCS_AND_FREQS);
  textType_pos_offset.setTokenized(true);
  textType_pos_offset.setStoreTermVectors(true);
  textType_pos_offset.setStoreTermVectorPositions(true);
  textType_pos_offset.setStoreTermVectorOffsets(true);

  String text = "It is a truth universally acknowledged, that a single man in possession of a good fortune, must be in want of a wife.";
  Document doc = new Document();
  doc.add(newField("text1", text, textType));
  doc.add(newField("text2", text, textType_pos));
  doc.add(newField("text3", text, textType_pos_offset));
  writer.addDocument(doc);

  writer.commit();
  writer.close();
  dir.close();
}
 
Example 14
Source File: TestCodecs.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testDocsOnlyFreq() throws Exception {
  // tests that when fields are indexed with DOCS_ONLY, the Codec
  // returns 1 in docsEnum.freq()
  Directory dir = newDirectory();
  Random random = random();
  IndexWriter writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random)));
  // we don't need many documents to assert this, but don't use one document either
  int numDocs = atLeast(random, 50);
  for (int i = 0; i < numDocs; i++) {
    Document doc = new Document();
    doc.add(new StringField("f", "doc", Store.NO));
    writer.addDocument(doc);
  }
  writer.close();
  
  Term term = new Term("f", new BytesRef("doc"));
  DirectoryReader reader = DirectoryReader.open(dir);
  for (LeafReaderContext ctx : reader.leaves()) {
    PostingsEnum de = ctx.reader().postings(term);
    while (de.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) {
      assertEquals("wrong freq for doc " + de.docID(), 1, de.freq());
    }
  }
  reader.close();
  
  dir.close();
}
 
Example 15
Source File: TestIndexOptions.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void doTestChangeIndexOptionsViaAddDocument(boolean preExistingField, boolean onNewSegment, IndexOptions from, IndexOptions to) throws IOException {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
  if (preExistingField) {
    w.addDocument(Collections.singleton(new IntPoint("foo", 1)));
    if (onNewSegment) {
      DirectoryReader.open(w).close();
    }
  }
  FieldType ft1 = new FieldType(TextField.TYPE_STORED);
  ft1.setIndexOptions(from);
  w.addDocument(Collections.singleton(new Field("foo", "bar", ft1)));
  if (onNewSegment) {
    DirectoryReader.open(w).close();
  }
  FieldType ft2 = new FieldType(TextField.TYPE_STORED);
  ft2.setIndexOptions(to);
  if (from == IndexOptions.NONE || to == IndexOptions.NONE || from == to) {
    w.addDocument(Collections.singleton(new Field("foo", "bar", ft2))); // no exception
    w.forceMerge(1);
    try (LeafReader r = getOnlyLeafReader(DirectoryReader.open(w))) {
      IndexOptions expected = from == IndexOptions.NONE ? to : from;
      assertEquals(expected, r.getFieldInfos().fieldInfo("foo").getIndexOptions());
    }
  } else {
    IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
        () -> w.addDocument(Collections.singleton(new Field("foo", "bar", ft2))));
    assertEquals("cannot change field \"foo\" from index options=" + from +
        " to inconsistent index options=" + to, e.getMessage());
  }
  w.close();
  dir.close();
}
 
Example 16
Source File: TestBooleanQuery.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testDeMorgan() throws Exception {
  Directory dir1 = newDirectory();
  RandomIndexWriter iw1 = new RandomIndexWriter(random(), dir1);
  Document doc1 = new Document();
  doc1.add(newTextField("field", "foo bar", Field.Store.NO));
  iw1.addDocument(doc1);
  IndexReader reader1 = iw1.getReader();
  iw1.close();

  Directory dir2 = newDirectory();
  RandomIndexWriter iw2 = new RandomIndexWriter(random(), dir2);
  Document doc2 = new Document();
  doc2.add(newTextField("field", "foo baz", Field.Store.NO));
  iw2.addDocument(doc2);
  IndexReader reader2 = iw2.getReader();
  iw2.close();

  BooleanQuery.Builder query = new BooleanQuery.Builder(); // Query: +foo -ba*
  query.add(new TermQuery(new Term("field", "foo")), BooleanClause.Occur.MUST);
  WildcardQuery wildcardQuery = new WildcardQuery(new Term("field", "ba*"));
  wildcardQuery.setRewriteMethod(MultiTermQuery.SCORING_BOOLEAN_REWRITE);
  query.add(wildcardQuery, BooleanClause.Occur.MUST_NOT);

  MultiReader multireader = new MultiReader(reader1, reader2);
  IndexSearcher searcher = newSearcher(multireader);
  assertEquals(0, searcher.search(query.build(), 10).totalHits.value);

  final ExecutorService es = Executors.newCachedThreadPool(new NamedThreadFactory("NRT search threads"));
  searcher = new IndexSearcher(multireader, es);
  if (VERBOSE)
    System.out.println("rewritten form: " + searcher.rewrite(query.build()));
  assertEquals(0, searcher.search(query.build(), 10).totalHits.value);
  es.shutdown();
  es.awaitTermination(1, TimeUnit.SECONDS);

  multireader.close();
  reader1.close();
  reader2.close();
  dir1.close();
  dir2.close();
}
 
Example 17
Source File: FastVectorHighlighterTest.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private void matchedFieldsTestCase( boolean useMatchedFields, boolean fieldMatch, String fieldValue, String expected, Query... queryClauses ) throws IOException {
  Document doc = new Document();
  FieldType stored = new FieldType( TextField.TYPE_STORED );
  stored.setStoreTermVectorOffsets( true );
  stored.setStoreTermVectorPositions( true );
  stored.setStoreTermVectors( true );
  stored.freeze();
  FieldType matched = new FieldType( TextField.TYPE_NOT_STORED );
  matched.setStoreTermVectorOffsets( true );
  matched.setStoreTermVectorPositions( true );
  matched.setStoreTermVectors( true );
  matched.freeze();
  doc.add( new Field( "field", fieldValue, stored ) );               // Whitespace tokenized with English stop words
  doc.add( new Field( "field_exact", fieldValue, matched ) );        // Whitespace tokenized without stop words
  doc.add( new Field( "field_super_exact", fieldValue, matched ) );  // Whitespace tokenized without toLower
  doc.add( new Field( "field_characters", fieldValue, matched ) );   // Each letter is a token
  doc.add( new Field( "field_tripples", fieldValue, matched ) );     // Every three letters is a token
  doc.add( new Field( "field_sliced", fieldValue.substring( 0,       // Sliced at 10 chars then analyzed just like field
    Math.min( fieldValue.length() - 1 , 10 ) ), matched ) );
  doc.add( new Field( "field_der_red", new CannedTokenStream(        // Hacky field containing "der" and "red" at pos = 0
        token( "der", 1, 0, 3 ),
        token( "red", 0, 0, 3 )
      ), matched ) );

  final Map<String, Analyzer> fieldAnalyzers = new TreeMap<>();
  fieldAnalyzers.put( "field", new MockAnalyzer( random(), MockTokenizer.WHITESPACE, true, MockTokenFilter.ENGLISH_STOPSET ) );
  fieldAnalyzers.put( "field_exact", new MockAnalyzer( random() ) );
  fieldAnalyzers.put( "field_super_exact", new MockAnalyzer( random(), MockTokenizer.WHITESPACE, false ) );
  fieldAnalyzers.put( "field_characters", new MockAnalyzer( random(), new CharacterRunAutomaton( new RegExp(".").toAutomaton() ), true ) );
  fieldAnalyzers.put( "field_tripples", new MockAnalyzer( random(), new CharacterRunAutomaton( new RegExp("...").toAutomaton() ), true ) );
  fieldAnalyzers.put( "field_sliced", fieldAnalyzers.get( "field" ) );
  fieldAnalyzers.put( "field_der_red", fieldAnalyzers.get( "field" ) );  // This is required even though we provide a token stream
  Analyzer analyzer = new DelegatingAnalyzerWrapper(Analyzer.PER_FIELD_REUSE_STRATEGY) {
    public Analyzer getWrappedAnalyzer(String fieldName) {
      return fieldAnalyzers.get( fieldName );
    }
  };

  Directory dir = newDirectory();
  IndexWriter writer = new IndexWriter( dir, newIndexWriterConfig(analyzer));
  writer.addDocument( doc );

  FastVectorHighlighter highlighter = new FastVectorHighlighter();
  FragListBuilder fragListBuilder = new SimpleFragListBuilder();
  FragmentsBuilder fragmentsBuilder = new ScoreOrderFragmentsBuilder();
  IndexReader reader = DirectoryReader.open(writer);
  String[] preTags = new String[] { "<b>" };
  String[] postTags = new String[] { "</b>" };
  Encoder encoder = new DefaultEncoder();
  int docId = 0;
  BooleanQuery.Builder query = new BooleanQuery.Builder();
  for ( Query clause : queryClauses ) {
    query.add( clause, Occur.MUST );
  }
  FieldQuery fieldQuery = new FieldQuery( query.build(), reader, true, fieldMatch );
  String[] bestFragments;
  if ( useMatchedFields ) {
    Set< String > matchedFields = new HashSet<>();
    matchedFields.add( "field" );
    matchedFields.add( "field_exact" );
    matchedFields.add( "field_super_exact" );
    matchedFields.add( "field_characters" );
    matchedFields.add( "field_tripples" );
    matchedFields.add( "field_sliced" );
    matchedFields.add( "field_der_red" );
    bestFragments = highlighter.getBestFragments( fieldQuery, reader, docId, "field", matchedFields, 25, 1,
      fragListBuilder, fragmentsBuilder, preTags, postTags, encoder );
  } else {
    bestFragments = highlighter.getBestFragments( fieldQuery, reader, docId, "field", 25, 1,
      fragListBuilder, fragmentsBuilder, preTags, postTags, encoder );
  }
  assertEquals( expected, bestFragments[ 0 ] );

  reader.close();
  writer.close();
  dir.close();
}
 
Example 18
Source File: BaseDocValuesFormatTestCase.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
@Nightly
public void testThreads3() throws Exception {
  Directory dir = newFSDirectory(createTempDir());
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, conf);
  
  int numSortedSets = random().nextInt(21);
  int numBinaries = random().nextInt(21);
  int numSortedNums = random().nextInt(21);
  
  int numDocs = TestUtil.nextInt(random(), 2025, 2047);
  for (int i = 0; i < numDocs; i++) {
    Document doc = new Document();
    
    for (int j = 0; j < numSortedSets; j++) {
      doc.add(new SortedSetDocValuesField("ss" + j, new BytesRef(TestUtil.randomSimpleString(random()))));
      doc.add(new SortedSetDocValuesField("ss" + j, new BytesRef(TestUtil.randomSimpleString(random()))));
    }
    
    for (int j = 0; j < numBinaries; j++) {
      doc.add(new BinaryDocValuesField("b" + j, new BytesRef(TestUtil.randomSimpleString(random()))));
    }
    
    for (int j = 0; j < numSortedNums; j++) {
      doc.add(new SortedNumericDocValuesField("sn" + j, TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE)));
      doc.add(new SortedNumericDocValuesField("sn" + j, TestUtil.nextLong(random(), Long.MIN_VALUE, Long.MAX_VALUE)));
    }
    writer.addDocument(doc);
  }
  writer.close();
  
  // now check with threads
  for (int i = 0; i < 10; i++) {
    final DirectoryReader r = DirectoryReader.open(dir);
    final CountDownLatch startingGun = new CountDownLatch(1);
    Thread threads[] = new Thread[TestUtil.nextInt(random(), 4, 10)];
    for (int tid = 0; tid < threads.length; tid++) {
      threads[tid] = new Thread() {
        @Override
        public void run() {
          try {
            ByteArrayOutputStream bos = new ByteArrayOutputStream(1024);
            PrintStream infoStream = new PrintStream(bos, false, IOUtils.UTF_8);
            startingGun.await();
            for (LeafReaderContext leaf : r.leaves()) {
              DocValuesStatus status = CheckIndex.testDocValues((SegmentReader)leaf.reader(), infoStream, true);
              if (status.error != null) {
                throw status.error;
              }
            }
          } catch (Throwable e) {
            throw new RuntimeException(e);
          }
        }
      };
    }
    for (int tid = 0; tid < threads.length; tid++) {
      threads[tid].start();
    }
    startingGun.countDown();
    for (int tid = 0; tid < threads.length; tid++) {
      threads[tid].join();
    }
    r.close();
  }

  dir.close();
}
 
Example 19
Source File: TestConcurrentMergeScheduler.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testHangDuringRollback() throws Throwable {
  Directory dir = newMockDirectory();
  IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
  iwc.setMaxBufferedDocs(2);
  LogDocMergePolicy mp = new LogDocMergePolicy();
  iwc.setMergePolicy(mp);
  mp.setMergeFactor(2);
  final CountDownLatch mergeStart = new CountDownLatch(1);
  final CountDownLatch mergeFinish = new CountDownLatch(1);
  ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler() {
      @Override
      protected void doMerge(MergeSource mergeSource, MergePolicy.OneMerge merge) throws IOException {
        mergeStart.countDown();
        try {
          mergeFinish.await();
        } catch (InterruptedException ie) {
          throw new RuntimeException(ie);
        }
        super.doMerge(mergeSource, merge);
      }
    };
  cms.setMaxMergesAndThreads(1, 1);
  iwc.setMergeScheduler(cms);

  final IndexWriter w = new IndexWriter(dir, iwc);
  
  w.addDocument(new Document());
  w.addDocument(new Document());
  // flush

  w.addDocument(new Document());
  w.addDocument(new Document());
  // flush + merge

  // Wait for merge to kick off
  mergeStart.await();

  new Thread() {
    @Override
    public void run() {
      try {
        w.addDocument(new Document());
        w.addDocument(new Document());
        // flush

        w.addDocument(new Document());
        // W/o the fix for LUCENE-6094 we would hang forever here:
        w.addDocument(new Document());
        // flush + merge
        
        // Now allow first merge to finish:
        mergeFinish.countDown();

      } catch (Exception e) {
        throw new RuntimeException(e);
      }
    }
  }.start();

  while (w.getDocStats().numDocs != 8) {
    Thread.sleep(10);
  }

  w.rollback();
  dir.close();
}
 
Example 20
Source File: TestNumericDocValuesUpdates.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testDocumentWithNoValue() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  IndexWriter writer = new IndexWriter(dir, conf);
  
  for (int i = 0; i < 2; i++) {
    Document doc = new Document();
    doc.add(new StringField("dvUpdateKey", "dv", Store.NO));
    if (i == 0) { // index only one document with value
      doc.add(new NumericDocValuesField("ndv", 5));
    }
    writer.addDocument(doc);
  }
  writer.commit();
  if (VERBOSE) {
    System.out.println("TEST: first commit");
  }
  
  // update all docs' ndv field
  writer.updateNumericDocValue(new Term("dvUpdateKey", "dv"), "ndv", 17L);
  if (VERBOSE) {
    System.out.println("TEST: first close");
  }
  writer.close();
  if (VERBOSE) {
    System.out.println("TEST: done close");
  }
  
  final DirectoryReader reader = DirectoryReader.open(dir);
  if (VERBOSE) {
    System.out.println("TEST: got reader=reader");
  }
  LeafReader r = reader.leaves().get(0).reader();
  NumericDocValues ndv = r.getNumericDocValues("ndv");
  for (int i = 0; i < r.maxDoc(); i++) {
    assertEquals(i, ndv.nextDoc());
    assertEquals("doc=" + i + " has wrong numeric doc value", 17, ndv.longValue());
  }
  
  reader.close();
  dir.close();
}