org.apache.lucene.util.IOUtils Java Examples

The following examples show how to use org.apache.lucene.util.IOUtils. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestTaxonomyFacetAssociations.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testMixedTypesInSameIndexField() throws Exception {
  Directory dir = newDirectory();
  Directory taxoDir = newDirectory();
  
  TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
  FacetsConfig config = new FacetsConfig();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  Document doc = new Document();
  doc.add(new IntAssociationFacetField(14, "a", "x"));
  doc.add(new FloatAssociationFacetField(55.0f, "b", "y"));
  expectThrows(IllegalArgumentException.class, () -> {
    writer.addDocument(config.build(taxoWriter, doc));
  });
  writer.close();
  IOUtils.close(taxoWriter, dir, taxoDir);
}
 
Example #2
Source File: KoreanTokenizerFactory.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public void inform(ResourceLoader loader) throws IOException {
  if (userDictionaryPath != null) {
    try (InputStream stream = loader.openResource(userDictionaryPath)) {
      String encoding = userDictionaryEncoding;
      if (encoding == null) {
        encoding = IOUtils.UTF_8;
      }
      CharsetDecoder decoder = Charset.forName(encoding).newDecoder()
        .onMalformedInput(CodingErrorAction.REPORT)
        .onUnmappableCharacter(CodingErrorAction.REPORT);
      Reader reader = new InputStreamReader(stream, decoder);
      userDictionary = UserDictionary.open(reader);
    }
  } else {
    userDictionary = null;
  }
}
 
Example #3
Source File: BlockTermsWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public void close() throws IOException {
  if (out != null) {
    try {
      final long dirStart = out.getFilePointer();
      
      out.writeVInt(fields.size());
      for(FieldMetaData field : fields) {
        out.writeVInt(field.fieldInfo.number);
        out.writeVLong(field.numTerms);
        out.writeVLong(field.termsStartPointer);
        if (field.fieldInfo.getIndexOptions() != IndexOptions.DOCS) {
          out.writeVLong(field.sumTotalTermFreq);
        }
        out.writeVLong(field.sumDocFreq);
        out.writeVInt(field.docCount);
      }
      writeTrailer(dirStart);
      CodecUtil.writeFooter(out);
    } finally {
      IOUtils.close(out, postingsWriter, termsIndexWriter);
      out = null;
    }
  }
}
 
Example #4
Source File: RandomIndexWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Close this writer.
 * @see IndexWriter#close()
 */
@Override
public void close() throws IOException {
  boolean success = false;
  try {
    if (w.isClosed() == false) {
      LuceneTestCase.maybeChangeLiveIndexWriterConfig(r, config);
    }
    // if someone isn't using getReader() API, we want to be sure to
    // forceMerge since presumably they might open a reader on the dir.
    if (getReaderCalled == false && r.nextInt(8) == 2 && w.isClosed() == false) {
      doRandomForceMerge();
      if (config.getCommitOnClose() == false) {
        // index may have changed, must commit the changes, or otherwise they are discarded by the call to close()
        w.commit();
      }
    }
    success = true;
  } finally {
    if (success) {
      IOUtils.close(w, analyzer);
    } else {
      IOUtils.closeWhileHandlingException(w, analyzer);
    }
  }
}
 
Example #5
Source File: SuggestUtils.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
/** NOTE: this method closes the TokenStream, even on exception, which is awkward
 *  because really the caller who called {@link Analyzer#tokenStream} should close it,
 *  but when trying that there are recursion issues when we try to use the same
 *  TokenStrem twice in the same recursion... */
public static int analyze(TokenStream stream, TokenConsumer consumer) throws IOException {
    int numTokens = 0;
    boolean success = false;
    try {
        stream.reset();
        consumer.reset(stream);
        while (stream.incrementToken()) {
            consumer.nextToken();
            numTokens++;
        }
        consumer.end();
    } finally {
        if (success) {
            stream.close();
        } else {
            IOUtils.closeWhileHandlingException(stream);
        }
    }
    return numTokens;
}
 
Example #6
Source File: TestOrdinalMappingLeafReader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testTaxonomyMergeUtils() throws Exception {
  Directory srcIndexDir = newDirectory();
  Directory srcTaxoDir = newDirectory();
  buildIndexWithFacets(srcIndexDir, srcTaxoDir, true);
  
  Directory targetIndexDir = newDirectory();
  Directory targetTaxoDir = newDirectory();
  buildIndexWithFacets(targetIndexDir, targetTaxoDir, false);
  
  IndexWriter destIndexWriter = new IndexWriter(targetIndexDir, newIndexWriterConfig(null));
  DirectoryTaxonomyWriter destTaxoWriter = new DirectoryTaxonomyWriter(targetTaxoDir);
  try {
    TaxonomyMergeUtils.merge(srcIndexDir, srcTaxoDir, new MemoryOrdinalMap(), destIndexWriter, destTaxoWriter, facetConfig);
  } finally {
    IOUtils.close(destIndexWriter, destTaxoWriter);
  }
  verifyResults(targetIndexDir, targetTaxoDir);
  
  IOUtils.close(targetIndexDir, targetTaxoDir, srcIndexDir, srcTaxoDir);
}
 
Example #7
Source File: TestDuelingAnalyzers.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testLetterAsciiHuge() throws Exception {
  Random random = random();
  int maxLength = 8192; // CharTokenizer.IO_BUFFER_SIZE*2
  MockAnalyzer left = new MockAnalyzer(random, jvmLetter, false);
  left.setMaxTokenLength(255); // match CharTokenizer's max token length
  Analyzer right = new Analyzer() {
    @Override
    protected TokenStreamComponents createComponents(String fieldName) {
      Tokenizer tokenizer = new LetterTokenizer(newAttributeFactory());
      return new TokenStreamComponents(tokenizer, tokenizer);
    }
  };
  int numIterations = atLeast(10);
  for (int i = 0; i < numIterations; i++) {
    String s = TestUtil.randomSimpleString(random, maxLength);
    assertEquals(s, left.tokenStream("foo", newStringReader(s)), 
                 right.tokenStream("foo", newStringReader(s)));
  }
  IOUtils.close(left, right);
}
 
Example #8
Source File: TestDirectSpellChecker.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testTransposition() throws Exception {
  DirectSpellChecker spellChecker = new DirectSpellChecker();
  Directory dir = newDirectory();
  Analyzer analyzer = new MockAnalyzer(random(), MockTokenizer.SIMPLE, true);
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir, analyzer);

  for (int i = 0; i < 20; i++) {
    Document doc = new Document();
    doc.add(newTextField("numbers", English.intToEnglish(i), Field.Store.NO));
    writer.addDocument(doc);
  }

  IndexReader ir = writer.getReader();

  SuggestWord[] similar = spellChecker.suggestSimilar(new Term(
      "numbers", "fvie"), 1, ir,
      SuggestMode.SUGGEST_WHEN_NOT_IN_INDEX);
  assertEquals(1, similar.length);
  assertEquals("five", similar[0].string);
  
  IOUtils.close(ir, writer, dir, analyzer);
}
 
Example #9
Source File: TestTaxonomyFacetCounts.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testDetectMultiValuedField() throws Exception {
  Directory dir = newDirectory();
  Directory taxoDir = newDirectory();
  TaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir, IndexWriterConfig.OpenMode.CREATE);
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);
  FacetsConfig config = new FacetsConfig();

  Document doc = new Document();
  doc.add(newTextField("field", "text", Field.Store.NO));
  doc.add(new FacetField("a", "path"));
  doc.add(new FacetField("a", "path2"));
  expectThrows(IllegalArgumentException.class, () -> {
    config.build(taxoWriter, doc);
  });

  writer.close();
  IOUtils.close(taxoWriter, dir, taxoDir);
}
 
Example #10
Source File: TestDuelingAnalyzers.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testLetterAscii() throws Exception {
  Random random = random();
  Analyzer left = new MockAnalyzer(random, jvmLetter, false);
  Analyzer right = new Analyzer() {
    @Override
    protected TokenStreamComponents createComponents(String fieldName) {
      Tokenizer tokenizer = new LetterTokenizer(newAttributeFactory());
      return new TokenStreamComponents(tokenizer, tokenizer);
    }
  };
  for (int i = 0; i < 200; i++) {
    String s = TestUtil.randomSimpleString(random);
    assertEquals(s, left.tokenStream("foo", newStringReader(s)), 
                 right.tokenStream("foo", newStringReader(s)));
  }
  IOUtils.close(left, right);
}
 
Example #11
Source File: TestTermAutomatonQuery.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testOneTermDoesNotExist() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter w = new RandomIndexWriter(random(), dir);
  Document doc = new Document();
  doc.add(newTextField("field", "x y z", Field.Store.NO));
  w.addDocument(doc);

  IndexReader r = w.getReader();
  IndexSearcher s = newSearcher(r);

  TokenStream ts = new CannedTokenStream(new Token[] {
      token("a", 1, 1),
      token("x", 1, 1),
    });

  TermAutomatonQuery q = new TokenStreamToTermAutomatonQuery().toQuery("field", ts);
  // System.out.println("DOT: " + q.toDot());
  assertEquals(0, s.search(q, 1).totalHits.value);

  IOUtils.close(w, r, dir);
}
 
Example #12
Source File: MergePolicy.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Sets the merge readers for this merge.
 */
void initMergeReaders(IOUtils.IOFunction<SegmentCommitInfo, MergeReader> readerFactory) throws IOException {
  assert mergeReaders.isEmpty() : "merge readers must be empty";
  assert mergeCompleted.isDone() == false : "merge is already done";
  final ArrayList<MergeReader> readers = new ArrayList<>(segments.size());
  try {
    for (final SegmentCommitInfo info : segments) {
      // Hold onto the "live" reader; we will use this to
      // commit merged deletes
      readers.add(readerFactory.apply(info));
    }
  } finally {
    // ensure we assign this to close them in the case of an exception
    this.mergeReaders = List.copyOf(readers); // we do a copy here to ensure that mergeReaders are an immutable list
  }
}
 
Example #13
Source File: TestLatLonShape.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testLUCENE8454() throws Exception {
  Directory dir = newDirectory();
  RandomIndexWriter writer = new RandomIndexWriter(random(), dir);

  Polygon poly = new Polygon(new double[] {-1.490648725633769E-132d, 90d, 90d, -1.490648725633769E-132d},
      new double[] {0d, 0d, 180d, 0d});

  Document document = new Document();
  addPolygonsToDoc(FIELDNAME, document, poly);
  writer.addDocument(document);

  ///// search //////
  IndexReader reader = writer.getReader();
  writer.close();
  IndexSearcher searcher = newSearcher(reader);

  // search a bbox in the hole
  Query q = LatLonShape.newBoxQuery(FIELDNAME, QueryRelation.DISJOINT,-29.46555603761226d, 0.0d, 8.381903171539307E-8d, 0.9999999403953552d);
  assertEquals(1, searcher.count(q));

  IOUtils.close(reader, dir);
}
 
Example #14
Source File: PerfRunData.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private Directory createDirectory(boolean eraseIndex, String dirName,
    String dirParam) throws IOException {
  String dirImpl = config.get(dirParam, DEFAULT_DIRECTORY);
  if ("FSDirectory".equals(dirImpl)) {
    Path workDir = Paths.get(config.get("work.dir", "work"));
    Path indexDir = workDir.resolve(dirName);
    if (eraseIndex && Files.exists(indexDir)) {
      IOUtils.rm(indexDir);
    }
    Files.createDirectories(indexDir);
    return FSDirectory.open(indexDir);
  }

  if ("RAMDirectory".equals(dirImpl)) {
    throw new IOException("RAMDirectory has been removed, use ByteBuffersDirectory.");
  }

  if ("ByteBuffersDirectory".equals(dirImpl)) {
    return new ByteBuffersDirectory();
  }

  throw new IOException("Directory type not supported: " + dirImpl);
}
 
Example #15
Source File: SolrResourceLoader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private static URLClassLoader addURLsToClassLoader(final URLClassLoader oldLoader, List<URL> urls) {
  if (urls.size() == 0) {
    return oldLoader;
  }

  List<URL> allURLs = new ArrayList<>();
  allURLs.addAll(Arrays.asList(oldLoader.getURLs()));
  allURLs.addAll(urls);
  for (URL url : urls) {
    if (log.isDebugEnabled()) {
      log.debug("Adding '{}' to classloader", url);
    }
  }

  ClassLoader oldParent = oldLoader.getParent();
  IOUtils.closeWhileHandlingException(oldLoader);
  return URLClassLoader.newInstance(allURLs.toArray(new URL[allURLs.size()]), oldParent);
}
 
Example #16
Source File: TestAddIndexes.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testIllegalIndexSortChange1() throws Exception {
  Directory dir1 = newDirectory();
  IndexWriterConfig iwc1 = newIndexWriterConfig(new MockAnalyzer(random()));
  iwc1.setIndexSort(new Sort(new SortField("foo", SortField.Type.INT)));
  RandomIndexWriter w1 = new RandomIndexWriter(random(), dir1, iwc1);
  w1.addDocument(new Document());
  w1.commit();
  w1.addDocument(new Document());
  w1.commit();
  // so the index sort is in fact burned into the index:
  w1.forceMerge(1);
  w1.close();

  Directory dir2 = newDirectory();
  IndexWriterConfig iwc2 = newIndexWriterConfig(new MockAnalyzer(random()));
  iwc2.setIndexSort(new Sort(new SortField("foo", SortField.Type.STRING)));
  RandomIndexWriter w2 = new RandomIndexWriter(random(), dir2, iwc2);
  String message = expectThrows(IllegalArgumentException.class, () -> {
      w2.addIndexes(dir1);
    }).getMessage();
  assertEquals("cannot change index sort from <int: \"foo\"> to <string: \"foo\">", message);
  IOUtils.close(dir1, w2, dir2);
}
 
Example #17
Source File: DiskDocValuesConsumer.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
@Override
public void close() throws IOException {
  boolean success = false;
  try {
    if (meta != null) {
      meta.writeVInt(-1); // write EOF marker
    }
    success = true;
  } finally {
    if (success) {
      IOUtils.close(data, meta);
    } else {
      IOUtils.closeWhileHandlingException(data, meta);
    }
  }
}
 
Example #18
Source File: TestIndexWriterFromReader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public void testInvalidOpenMode() throws Exception {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, newIndexWriterConfig());
  w.addDocument(new Document());
  w.commit();

  DirectoryReader r = DirectoryReader.open(w);
  assertEquals(1, r.maxDoc());
  w.close();

  IndexWriterConfig iwc = newIndexWriterConfig();
  iwc.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
  iwc.setIndexCommit(r.getIndexCommit());
  IllegalArgumentException expected = expectThrows(IllegalArgumentException.class, () -> {
    new IndexWriter(dir, iwc);
  });
  assertEquals("cannot use IndexWriterConfig.setIndexCommit() with OpenMode.CREATE", expected.getMessage());

  IOUtils.close(r, dir);
}
 
Example #19
Source File: IndexAndTaxonomyRevisionTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testSegmentsFileLast() throws Exception {
  Directory indexDir = newDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(null);
  conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
  IndexWriter indexWriter = new IndexWriter(indexDir, conf);
  
  Directory taxoDir = newDirectory();
  SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
  try {
    indexWriter.addDocument(newDocument(taxoWriter));
    indexWriter.commit();
    taxoWriter.commit();
    Revision rev = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
    Map<String,List<RevisionFile>> sourceFiles = rev.getSourceFiles();
    assertEquals(2, sourceFiles.size());
    for (List<RevisionFile> files : sourceFiles.values()) {
      String lastFile = files.get(files.size() - 1).fileName;
      assertTrue(lastFile.startsWith(IndexFileNames.SEGMENTS));
    }
    indexWriter.close();
  } finally {
    IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir);
  }
}
 
Example #20
Source File: SolrSnapshotMetaDataManager.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private synchronized void persist() throws IOException {
  String fileName = SNAPSHOTS_PREFIX + nextWriteGen;
  IndexOutput out = dir.createOutput(fileName, IOContext.DEFAULT);
  boolean success = false;
  try {
    CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
    out.writeVInt(nameToDetailsMapping.size());
    for(Entry<String,SnapshotMetaData> ent : nameToDetailsMapping.entrySet()) {
      out.writeString(ent.getKey());
      out.writeString(ent.getValue().getIndexDirPath());
      out.writeVLong(ent.getValue().getGenerationNumber());
    }
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
      IOUtils.deleteFilesIgnoringExceptions(dir, fileName);
    } else {
      IOUtils.close(out);
    }
  }

  dir.sync(Collections.singletonList(fileName));

  if (nextWriteGen > 0) {
    String lastSaveFile = SNAPSHOTS_PREFIX + (nextWriteGen-1);
    // exception OK: likely it didn't exist
    IOUtils.deleteFilesIgnoringExceptions(dir, lastSaveFile);
  }

  nextWriteGen++;
}
 
Example #21
Source File: TestFacetsConfig.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testAddSameDocTwice() throws Exception {
  // LUCENE-5367: this was a problem with the previous code, making sure it
  // works with the new code.
  Directory indexDir = newDirectory(), taxoDir = newDirectory();
  IndexWriter indexWriter = new IndexWriter(indexDir, newIndexWriterConfig(new MockAnalyzer(random())));
  DirectoryTaxonomyWriter taxoWriter = new DirectoryTaxonomyWriter(taxoDir);
  FacetsConfig facetsConfig = new FacetsConfig();
  Document doc = new Document();
  doc.add(new FacetField("a", "b"));
  doc = facetsConfig.build(taxoWriter, doc);
  // these two addDocument() used to fail
  indexWriter.addDocument(doc);
  indexWriter.addDocument(doc);
  indexWriter.close();
  IOUtils.close(taxoWriter);
  
  DirectoryReader indexReader = DirectoryReader.open(indexDir);
  DirectoryTaxonomyReader taxoReader = new DirectoryTaxonomyReader(taxoDir);
  IndexSearcher searcher = newSearcher(indexReader);
  FacetsCollector fc = new FacetsCollector();
  searcher.search(new MatchAllDocsQuery(), fc);
  
  Facets facets = getTaxonomyFacetCounts(taxoReader, facetsConfig, fc);
  FacetResult res = facets.getTopChildren(10, "a");
  assertEquals(1, res.labelValues.length);
  assertEquals(2, res.labelValues[0].value);
  IOUtils.close(indexReader, taxoReader);
  
  IOUtils.close(indexDir, taxoDir);
}
 
Example #22
Source File: CopyJob.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/** Transfers whatever tmp files were already copied in this previous job and cancels the previous job */
public synchronized void transferAndCancel(CopyJob prevJob) throws IOException {
  synchronized(prevJob) {
    dest.message("CopyJob: now transfer prevJob " + prevJob);
    try {
      _transferAndCancel(prevJob);
    } catch (Throwable t) {
      dest.message("xfer: exc during transferAndCancel");
      cancel("exc during transferAndCancel", t);
      throw IOUtils.rethrowAlways(t);
    }
  }
}
 
Example #23
Source File: TestIndexSorting.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testIndexSortOnSparseField() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig iwc = new IndexWriterConfig(new MockAnalyzer(random()));
  SortField sortField = new SortField("sparse", SortField.Type.INT, false);
  sortField.setMissingValue(Integer.MIN_VALUE);
  Sort indexSort = new Sort(sortField);
  iwc.setIndexSort(indexSort);
  IndexWriter w = new IndexWriter(dir, iwc);
  for (int i = 0; i < 128; i++) {
    Document doc = new Document();
    if (i < 64) {
      doc.add(new NumericDocValuesField("sparse", i));
    }
    w.addDocument(doc);
  }
  w.commit();
  w.forceMerge(1);
  DirectoryReader r = DirectoryReader.open(w);
  assertEquals(1, r.leaves().size());
  LeafReader leafReader = r.leaves().get(0).reader();
  NumericDocValues sparseValues = leafReader.getNumericDocValues("sparse");
  for(int docID = 0; docID < 128; docID++) {
    if (docID >= 64) {
      assertTrue(sparseValues.advanceExact(docID));
      assertEquals(docID-64, (int) sparseValues.longValue());
    } else {
      assertFalse(sparseValues.advanceExact(docID));
    }
  }
  IOUtils.close(r, w, dir);
}
 
Example #24
Source File: TestNumericDocValuesUpdates.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testIOContext() throws Exception {
  // LUCENE-5591: make sure we pass an IOContext with an approximate
  // segmentSize in FlushInfo
  Directory dir = newDirectory();
  IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
  // we want a single large enough segment so that a doc-values update writes a large file
  conf.setMergePolicy(NoMergePolicy.INSTANCE);
  conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
  conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
  IndexWriter writer = new IndexWriter(dir, conf);
  for (int i = 0; i < 100; i++) {
    writer.addDocument(doc(i));
  }
  writer.commit();
  writer.close();
  
  NRTCachingDirectory cachingDir = new NRTCachingDirectory(dir, 100, 1/(1024.*1024.));
  conf = newIndexWriterConfig(new MockAnalyzer(random()));
  // we want a single large enough segment so that a doc-values update writes a large file
  conf.setMergePolicy(NoMergePolicy.INSTANCE);
  conf.setMaxBufferedDocs(Integer.MAX_VALUE); // manually flush
  conf.setRAMBufferSizeMB(IndexWriterConfig.DISABLE_AUTO_FLUSH);
  writer = new IndexWriter(cachingDir, conf);
  writer.updateNumericDocValue(new Term("id", "doc-0"), "val", 100L);
  DirectoryReader reader = DirectoryReader.open(writer); // flush
  assertEquals(0, cachingDir.listCachedFiles().length);
  
  IOUtils.close(reader, writer, cachingDir);
}
 
Example #25
Source File: TestCustomTermFreq.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testInvalidProx() throws Exception {
  Directory dir = newDirectory();
  IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(new MockAnalyzer(random())));

  Document doc = new Document();
  FieldType fieldType = new FieldType(TextField.TYPE_NOT_STORED);
  Field field = new Field("field",
                          new CannedTermFreqs(new String[] {"foo", "bar", "foo", "bar"},
                                              new int[] {42, 128, 17, 100}),
                          fieldType);
  doc.add(field);
  Exception e = expectThrows(IllegalStateException.class, () -> {w.addDocument(doc);});
  assertEquals("field \"field\": cannot index positions while using custom TermFrequencyAttribute", e.getMessage());
  IOUtils.close(w, dir);
}
 
Example #26
Source File: CheckIndex.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Test field infos.
 * @lucene.experimental
 */
public static Status.FieldInfoStatus testFieldInfos(CodecReader reader, PrintStream infoStream, boolean failFast) throws IOException {
  long startNS = System.nanoTime();
  final Status.FieldInfoStatus status = new Status.FieldInfoStatus();
  
  try {
    // Test Field Infos
    if (infoStream != null) {
      infoStream.print("    test: field infos.........");
    }
    FieldInfos fieldInfos = reader.getFieldInfos();
    for (FieldInfo f : fieldInfos) {
      f.checkConsistency();
    }
    msg(infoStream, String.format(Locale.ROOT, "OK [%d fields] [took %.3f sec]", fieldInfos.size(), nsToSec(System.nanoTime()-startNS)));
    status.totFields = fieldInfos.size();
  } catch (Throwable e) {
    if (failFast) {
      throw IOUtils.rethrowAlways(e);
    }
    msg(infoStream, "ERROR [" + String.valueOf(e.getMessage()) + "]");
    status.error = e;
    if (infoStream != null) {
      e.printStackTrace(infoStream);
    }
  }
  
  return status;
}
 
Example #27
Source File: FileSystemUtils.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
/**
 * Deletes all subdirectories in the given path recursively
 * @throws java.lang.IllegalArgumentException if the given path is not a directory
 */
public static void deleteSubDirectories(Path... paths) throws IOException {
    for (Path path : paths) {
        try (DirectoryStream<Path> stream = Files.newDirectoryStream(path)) {
            for (Path subPath : stream) {
                if (Files.isDirectory(subPath)) {
                    IOUtils.rm(subPath);
                }
            }
        }
    }
}
 
Example #28
Source File: FsBlobContainer.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {
    final Path file = path.resolve(blobName);
    try (OutputStream outputStream = Files.newOutputStream(file)) {
        Streams.copy(inputStream, outputStream, new byte[blobStore.bufferSizeInBytes()]);
    }
    IOUtils.fsync(file, false);
    IOUtils.fsync(path, true);
}
 
Example #29
Source File: SegmentInfos.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
final void rollbackCommit(Directory dir) {
  if (pendingCommit) {
    pendingCommit = false;
    
    // we try to clean up our pending_segments_N

    // Must carefully compute fileName from "generation"
    // since lastGeneration isn't incremented:
    final String pending = IndexFileNames.fileNameFromGeneration(IndexFileNames.PENDING_SEGMENTS, "", generation);

    // Suppress so we keep throwing the original exception
    // in our caller
    IOUtils.deleteFilesIgnoringExceptions(dir, pending);
  }
}
 
Example #30
Source File: FSTTermsReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public FSTTermsReader(SegmentReadState state, PostingsReaderBase postingsReader) throws IOException {
  final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, FSTTermsWriter.TERMS_EXTENSION);

  this.postingsReader = postingsReader;
  final IndexInput in = state.directory.openInput(termsFileName, state.context);

  boolean success = false;
  try {
    CodecUtil.checkIndexHeader(in, FSTTermsWriter.TERMS_CODEC_NAME,
                                     FSTTermsWriter.TERMS_VERSION_START,
                                     FSTTermsWriter.TERMS_VERSION_CURRENT,
                                     state.segmentInfo.getId(), state.segmentSuffix);
    CodecUtil.checksumEntireFile(in);
    this.postingsReader.init(in, state);
    seekDir(in);

    final FieldInfos fieldInfos = state.fieldInfos;
    final int numFields = in.readVInt();
    for (int i = 0; i < numFields; i++) {
      int fieldNumber = in.readVInt();
      FieldInfo fieldInfo = fieldInfos.fieldInfo(fieldNumber);
      long numTerms = in.readVLong();
      long sumTotalTermFreq = in.readVLong();
      // if frequencies are omitted, sumTotalTermFreq=sumDocFreq and we only write one value
      long sumDocFreq = fieldInfo.getIndexOptions() == IndexOptions.DOCS ? sumTotalTermFreq : in.readVLong();
      int docCount = in.readVInt();
      TermsReader current = new TermsReader(fieldInfo, in, numTerms, sumTotalTermFreq, sumDocFreq, docCount);
      TermsReader previous = fields.put(fieldInfo.name, current);
      checkFieldSummary(state.segmentInfo, in, current, previous);
    }
    success = true;
  } finally {
    if (success) {
      IOUtils.close(in);
    } else {
      IOUtils.closeWhileHandlingException(in);
    }
  }
}