org.apache.lucene.index.IndexFileNames Java Examples

The following examples show how to use org.apache.lucene.index.IndexFileNames. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FixedGapTermsIndexWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public FixedGapTermsIndexWriter(SegmentWriteState state, int termIndexInterval) throws IOException {
  if (termIndexInterval <= 0) {
    throw new IllegalArgumentException("invalid termIndexInterval: " + termIndexInterval);
  }
  this.termIndexInterval = termIndexInterval;
  final String indexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
  out = state.directory.createOutput(indexFileName, state.context);
  boolean success = false;
  try {
    CodecUtil.writeIndexHeader(out, CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    out.writeVInt(termIndexInterval);
    out.writeVInt(PackedInts.VERSION_CURRENT);
    out.writeVInt(BLOCKSIZE);
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
Example #2
Source File: IndexAndTaxonomyRevisionTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testSegmentsFileLast() throws Exception {
  Directory indexDir = newDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(null);
  conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
  IndexWriter indexWriter = new IndexWriter(indexDir, conf);
  
  Directory taxoDir = newDirectory();
  SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
  try {
    indexWriter.addDocument(newDocument(taxoWriter));
    indexWriter.commit();
    taxoWriter.commit();
    Revision rev = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
    Map<String,List<RevisionFile>> sourceFiles = rev.getSourceFiles();
    assertEquals(2, sourceFiles.size());
    for (List<RevisionFile> files : sourceFiles.values()) {
      String lastFile = files.get(files.size() - 1).fileName;
      assertTrue(lastFile.startsWith(IndexFileNames.SEGMENTS));
    }
    indexWriter.close();
  } finally {
    IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir);
  }
}
 
Example #3
Source File: FSTTermsWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public FSTTermsWriter(SegmentWriteState state, PostingsWriterBase postingsWriter) throws IOException {
  final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION);

  this.postingsWriter = postingsWriter;
  this.fieldInfos = state.fieldInfos;
  this.out = state.directory.createOutput(termsFileName, state.context);
  this.maxDoc = state.segmentInfo.maxDoc();

  boolean success = false;
  try {
    CodecUtil.writeIndexHeader(out, TERMS_CODEC_NAME, TERMS_VERSION_CURRENT,
                                      state.segmentInfo.getId(), state.segmentSuffix);   

    this.postingsWriter.init(out, state); 
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
Example #4
Source File: IndexReplicationHandler.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Verifies that the last file is segments_N and fails otherwise. It also
 * removes and returns the file from the list, because it needs to be handled
 * last, after all files. This is important in order to guarantee that if a
 * reader sees the new segments_N, all other segment files are already on
 * stable storage.
 * <p>
 * The reason why the code fails instead of putting segments_N file last is
 * that this indicates an error in the Revision implementation.
 */
public static String getSegmentsFile(List<String> files, boolean allowEmpty) {
  if (files.isEmpty()) {
    if (allowEmpty) {
      return null;
    } else {
      throw new IllegalStateException("empty list of files not allowed");
    }
  }
  
  String segmentsFile = files.remove(files.size() - 1);
  if (!segmentsFile.startsWith(IndexFileNames.SEGMENTS)) {
    throw new IllegalStateException("last file to copy+sync must be segments_N but got " + segmentsFile
        + "; check your Revision implementation!");
  }
  return segmentsFile;
}
 
Example #5
Source File: IndexRevisionTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testSegmentsFileLast() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(null);
  conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
  IndexWriter writer = new IndexWriter(dir, conf);
  try {
    writer.addDocument(new Document());
    writer.commit();
    Revision rev = new IndexRevision(writer);
    @SuppressWarnings("unchecked")
    Map<String, List<RevisionFile>> sourceFiles = rev.getSourceFiles();
    assertEquals(1, sourceFiles.size());
    List<RevisionFile> files = sourceFiles.values().iterator().next();
    String lastFile = files.get(files.size() - 1).fileName;
    assertTrue(lastFile.startsWith(IndexFileNames.SEGMENTS));
    writer.close();
  } finally {
    IOUtils.close(dir);
  }
}
 
Example #6
Source File: IndexRevisionTest.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Test
public void testRevisionRelease() throws Exception {
  Directory dir = newDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(null);
  conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
  IndexWriter writer = new IndexWriter(dir, conf);
  try {
    writer.addDocument(new Document());
    writer.commit();
    Revision rev1 = new IndexRevision(writer);
    // releasing that revision should not delete the files
    rev1.release();
    assertTrue(slowFileExists(dir, IndexFileNames.SEGMENTS + "_1"));
    
    rev1 = new IndexRevision(writer); // create revision again, so the files are snapshotted
    writer.addDocument(new Document());
    writer.commit();
    assertNotNull(new IndexRevision(writer));
    rev1.release(); // this release should trigger the delete of segments_1
    assertFalse(slowFileExists(dir, IndexFileNames.SEGMENTS + "_1"));
  } finally {
    IOUtils.close(writer, dir);
  }
}
 
Example #7
Source File: RAMOnlyPostingsFormat.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public FieldsProducer fieldsProducer(SegmentReadState readState)
  throws IOException {

  // Load our ID:
  final String idFileName = IndexFileNames.segmentFileName(readState.segmentInfo.name, readState.segmentSuffix, ID_EXTENSION);
  IndexInput in = readState.directory.openInput(idFileName, readState.context);
  boolean success = false;
  final int id;
  try {
    CodecUtil.checkHeader(in, RAM_ONLY_NAME, VERSION_START, VERSION_LATEST);
    id = in.readVInt();
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(in);
    } else {
      IOUtils.close(in);
    }
  }
  
  synchronized(state) {
    return state.get(id);
  }
}
 
Example #8
Source File: BlockTermsWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public BlockTermsWriter(TermsIndexWriterBase termsIndexWriter,
    SegmentWriteState state, PostingsWriterBase postingsWriter)
    throws IOException {
  final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION);
  this.termsIndexWriter = termsIndexWriter;
  maxDoc = state.segmentInfo.maxDoc();
  out = state.directory.createOutput(termsFileName, state.context);
  boolean success = false;
  try {
    fieldInfos = state.fieldInfos;
    CodecUtil.writeIndexHeader(out, CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    currentField = null;
    this.postingsWriter = postingsWriter;
    // segment = state.segmentName;
    
    //System.out.println("BTW.init seg=" + state.segmentName);
    
    postingsWriter.init(out, state); // have consumer write its format/header
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
Example #9
Source File: SimpleTextPointsWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public void close() throws IOException {
  if (dataOut != null) {
    dataOut.close();
    dataOut = null;

    // Write index file
    String fileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, SimpleTextPointsFormat.POINT_INDEX_EXTENSION);
    try (IndexOutput indexOut = writeState.directory.createOutput(fileName, writeState.context)) {
      int count = indexFPs.size();
      write(indexOut, FIELD_COUNT);
      write(indexOut, Integer.toString(count));
      newline(indexOut);
      for(Map.Entry<String,Long> ent : indexFPs.entrySet()) {
        write(indexOut, FIELD_FP_NAME);
        write(indexOut, ent.getKey());
        newline(indexOut);
        write(indexOut, FIELD_FP);
        write(indexOut, Long.toString(ent.getValue()));
        newline(indexOut);
      }
      SimpleTextUtil.writeChecksum(indexOut, scratch);
    }
  }
}
 
Example #10
Source File: CompletionFieldsConsumer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
CompletionFieldsConsumer(String codecName, PostingsFormat delegatePostingsFormat, SegmentWriteState state) throws IOException {
  this.codecName = codecName;
  this.delegatePostingsFormatName = delegatePostingsFormat.getName();
  this.state = state;
  String dictFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, DICT_EXTENSION);
  boolean success = false;
  try {
    this.delegateFieldsConsumer = delegatePostingsFormat.fieldsConsumer(state);
    dictOut = state.directory.createOutput(dictFile, state.context);
    CodecUtil.writeIndexHeader(dictOut, codecName, COMPLETION_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    success = true;
  } finally {
    if (success == false) {
      IOUtils.closeWhileHandlingException(dictOut, delegateFieldsConsumer);
    }
  }
}
 
Example #11
Source File: Completion090PostingsFormat.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
public CompletionFieldsConsumer(SegmentWriteState state) throws IOException {
    this.delegatesFieldsConsumer = delegatePostingsFormat.fieldsConsumer(state);
    String suggestFSTFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION);
    IndexOutput output = null;
    boolean success = false;
    try {
        output = state.directory.createOutput(suggestFSTFile, state.context);
        CodecUtil.writeHeader(output, CODEC_NAME, SUGGEST_VERSION_CURRENT);
        /*
         * we write the delegate postings format name so we can load it
         * without getting an instance in the ctor
         */
        output.writeString(delegatePostingsFormat.getName());
        output.writeString(writeProvider.getName());
        this.suggestFieldsConsumer = writeProvider.consumer(output);
        success = true;
    } finally {
        if (!success) {
            IOUtils.closeWhileHandlingException(output);
        }
    }
}
 
Example #12
Source File: Lucene50LiveDocsFormat.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public void writeLiveDocs(Bits bits, Directory dir, SegmentCommitInfo info, int newDelCount, IOContext context) throws IOException {
  long gen = info.getNextDelGen();
  String name = IndexFileNames.fileNameFromGeneration(info.info.name, EXTENSION, gen);
  int delCount = 0;
  try (IndexOutput output = dir.createOutput(name, context)) {
    CodecUtil.writeIndexHeader(output, CODEC_NAME, VERSION_CURRENT, info.info.getId(), Long.toString(gen, Character.MAX_RADIX));
    final int longCount = FixedBitSet.bits2words(bits.length());
    for (int i = 0; i < longCount; ++i) {
      long currentBits = 0;
      for (int j = i << 6, end = Math.min(j + 63, bits.length() - 1); j <= end; ++j) {
        if (bits.get(j)) {
          currentBits |= 1L << j; // mod 64
        } else {
          delCount += 1;
        }
      }
      output.writeLong(currentBits);
    }
    CodecUtil.writeFooter(output);
  }
  if (delCount != info.getDelCount() + newDelCount) {
    throw new CorruptIndexException("bits.deleted=" + delCount + 
        " info.delcount=" + info.getDelCount() + " newdelcount=" + newDelCount, name);
  }
}
 
Example #13
Source File: Lucene80NormsConsumer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
Lucene80NormsConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
  boolean success = false;
  try {
    String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
    data = state.directory.createOutput(dataName, state.context);
    CodecUtil.writeIndexHeader(data, dataCodec, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
    meta = state.directory.createOutput(metaName, state.context);
    CodecUtil.writeIndexHeader(meta, metaCodec, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    maxDoc = state.segmentInfo.maxDoc();
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(this);
    }
  }
}
 
Example #14
Source File: Lucene80DocValuesConsumer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** expert: Creates a new writer */
public Lucene80DocValuesConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
  boolean success = false;
  try {
    this.state = state;
    String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
    data = state.directory.createOutput(dataName, state.context);
    CodecUtil.writeIndexHeader(data, dataCodec, Lucene80DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
    meta = state.directory.createOutput(metaName, state.context);
    CodecUtil.writeIndexHeader(meta, metaCodec, Lucene80DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    maxDoc = state.segmentInfo.maxDoc();
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(this);
    }
  }
}
 
Example #15
Source File: BlockDirectory.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/**
 * Determine whether write caching should be used for a particular
 * file/context.
 */
boolean useWriteCache(String name, IOContext context) {
  if (!blockCacheWriteEnabled || name.startsWith(IndexFileNames.PENDING_SEGMENTS)) {
    // for safety, don't bother caching pending commits.
    // the cache does support renaming (renameCacheFile), but thats a scary optimization.
    return false;
  }
  if (blockCacheFileTypes != null && !isCachableFile(name)) {
    return false;
  }
  switch (context.context) {
    case MERGE: {
      // we currently don't cache any merge context writes
      return false;
    }
    default: {
      return true;
    }
  }
}
 
Example #16
Source File: DiskDocValuesConsumer.java    From incubator-retired-blur with Apache License 2.0 6 votes vote down vote up
public DiskDocValuesConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
  boolean success = false;
  try {
    String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
    data = state.directory.createOutput(dataName, state.context);
    CodecUtil.writeHeader(data, dataCodec, DiskDocValuesFormat.VERSION_CURRENT);
    String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
    meta = state.directory.createOutput(metaName, state.context);
    CodecUtil.writeHeader(meta, metaCodec, DiskDocValuesFormat.VERSION_CURRENT);
    maxDoc = state.segmentInfo.getDocCount();
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(this);
    }
  }
}
 
Example #17
Source File: Lucene.java    From crate with Apache License 2.0 6 votes vote down vote up
/**
 * This method removes all lucene files from the given directory. It will first try to delete all commit points / segments
 * files to ensure broken commits or corrupted indices will not be opened in the future. If any of the segment files can't be deleted
 * this operation fails.
 */
public static void cleanLuceneIndex(Directory directory) throws IOException {
    try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        for (final String file : directory.listAll()) {
            if (file.startsWith(IndexFileNames.SEGMENTS) || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) {
                directory.deleteFile(file); // remove all segment_N files
            }
        }
    }
    try (IndexWriter writer = new IndexWriter(directory, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)
            .setSoftDeletesField(Lucene.SOFT_DELETES_FIELD)
            .setMergePolicy(NoMergePolicy.INSTANCE) // no merges
            .setCommitOnClose(false) // no commits
            .setOpenMode(IndexWriterConfig.OpenMode.CREATE))) { // force creation - don't append...
        // do nothing and close this will kick of IndexFileDeleter which will remove all pending files
    }
}
 
Example #18
Source File: Blur022SegmentInfoReader.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
@Override
public SegmentInfo read(Directory dir, String segment, IOContext context) throws IOException {
  final String fileName = IndexFileNames.segmentFileName(segment, "", Blur022SegmentInfoFormat.SI_EXTENSION);
  final IndexInput input = dir.openInput(fileName, context);
  boolean success = false;
  try {
    CodecUtil.checkHeader(input, Blur022SegmentInfoFormat.CODEC_NAME, Blur022SegmentInfoFormat.VERSION_START,
        Blur022SegmentInfoFormat.VERSION_CURRENT);
    final String version = input.readString();
    final int docCount = input.readInt();
    if (docCount < 0) {
      throw new CorruptIndexException("invalid docCount: " + docCount + " (resource=" + input + ")");
    }
    final boolean isCompoundFile = input.readByte() == SegmentInfo.YES;
    final Map<String, String> diagnostics = input.readStringStringMap();
    final Map<String, String> attributes = input.readStringStringMap();
    final Set<String> files = input.readStringSet();

    if (input.getFilePointer() != input.length()) {
      throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read "
          + input.getFilePointer() + " vs size " + input.length() + " (resource: " + input + ")");
    }

    final SegmentInfo si = new SegmentInfo(dir, version, segment, docCount, isCompoundFile, null, diagnostics,
        Collections.unmodifiableMap(attributes));
    si.setFiles(files);

    success = true;

    return si;

  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(input);
    } else {
      input.close();
    }
  }
}
 
Example #19
Source File: Store.java    From crate with Apache License 2.0 5 votes vote down vote up
/**
 * This method deletes every file in this store that is not contained in the given source meta data or is a
 * legacy checksum file. After the delete it pulls the latest metadata snapshot from the store and compares it
 * to the given snapshot. If the snapshots are inconsistent an illegal state exception is thrown.
 *
 * @param reason         the reason for this cleanup operation logged for each deleted file
 * @param sourceMetaData the metadata used for cleanup. all files in this metadata should be kept around.
 * @throws IOException           if an IOException occurs
 * @throws IllegalStateException if the latest snapshot in this store differs from the given one after the cleanup.
 */
public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) throws IOException {
    metadataLock.writeLock().lock();
    try (Lock writeLock = directory.obtainLock(IndexWriter.WRITE_LOCK_NAME)) {
        for (String existingFile : directory.listAll()) {
            if (Store.isAutogenerated(existingFile) || sourceMetaData.contains(existingFile)) {
                continue; // don't delete snapshot file, or the checksums file (note, this is extra protection since the Store won't delete checksum)
            }
            try {
                directory.deleteFile(reason, existingFile);
                // FNF should not happen since we hold a write lock?
            } catch (IOException ex) {
                if (existingFile.startsWith(IndexFileNames.SEGMENTS)
                        || existingFile.equals(IndexFileNames.OLD_SEGMENTS_GEN)
                        || existingFile.startsWith(CORRUPTED)) {
                    // TODO do we need to also fail this if we can't delete the pending commit file?
                    // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around?
                    throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex);
                }
                logger.debug(() -> new ParameterizedMessage("failed to delete file [{}]", existingFile), ex);
                // ignore, we don't really care, will get deleted later on
            }
        }
        directory.syncMetaData();
        final Store.MetadataSnapshot metadataOrEmpty = getMetadata(null);
        verifyAfterCleanup(sourceMetaData, metadataOrEmpty);
    } finally {
        metadataLock.writeLock().unlock();
    }
}
 
Example #20
Source File: DiskDocValuesProducer.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
DiskDocValuesProducer(SegmentReadState state, String dataCodec, String dataExtension, String metaCodec,
    String metaExtension) throws IOException {
  String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
  // read in the entries from the metadata file.
  IndexInput in = state.directory.openInput(metaName, state.context);
  boolean success = false;
  try {
    CodecUtil.checkHeader(in, metaCodec, DiskDocValuesFormat.VERSION_START, DiskDocValuesFormat.VERSION_START);
    numerics = new ConcurrentHashMap<Integer, NumericEntry>();
    ords = new ConcurrentHashMap<Integer, NumericEntry>();
    ordIndexes = new ConcurrentHashMap<Integer, NumericEntry>();
    binaries = new ConcurrentHashMap<Integer, BinaryEntry>();
    _binaryDocValuesCache = new ConcurrentHashMap<Integer, BinaryDocValues>();
    _numericDocValuesCache = new ConcurrentHashMap<Integer, NumericDocValues>();
    _sortedDocValuesCache = new ConcurrentHashMap<Integer, SortedDocValues>();
    _sortedSetDocValuesCache = new ConcurrentHashMap<Integer, SortedSetDocValues>();
    readFields(in, state.fieldInfos);
    success = true;
  } finally {
    if (success) {
      IOUtils.close(in);
    } else {
      IOUtils.closeWhileHandlingException(in);
    }
  }

  String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
  data = state.directory.openInput(dataName, state.context);
  CodecUtil.checkHeader(data, dataCodec, DiskDocValuesFormat.VERSION_START, DiskDocValuesFormat.VERSION_START);
}
 
Example #21
Source File: Store.java    From crate with Apache License 2.0 5 votes vote down vote up
private int numSegmentFiles() { // only for asserts
    int count = 0;
    for (StoreFileMetaData file : this) {
        if (file.name().startsWith(IndexFileNames.SEGMENTS)) {
            count++;
        }
    }
    return count;
}
 
Example #22
Source File: Lucene80NormsProducer.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
Lucene80NormsProducer(SegmentReadState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
  maxDoc = state.segmentInfo.maxDoc();
  String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
  int version = -1;

  // read in the entries from the metadata file.
  try (ChecksumIndexInput in = state.directory.openChecksumInput(metaName, state.context)) {
    Throwable priorE = null;
    try {
      version = CodecUtil.checkIndexHeader(in, metaCodec, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
      readFields(in, state.fieldInfos);
    } catch (Throwable exception) {
      priorE = exception;
    } finally {
      CodecUtil.checkFooter(in, priorE);
    }
  }

  String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
  data = state.directory.openInput(dataName, state.context);
  boolean success = false;
  try {
    final int version2 = CodecUtil.checkIndexHeader(data, dataCodec, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    if (version != version2) {
      throw new CorruptIndexException("Format versions mismatch: meta=" + version + ",data=" + version2, data);
    }

    // NOTE: data file is too costly to verify checksum against all the bytes on open,
    // but for now we at least verify proper structure of the checksum footer: which looks
    // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption
    // such as file truncation.
    CodecUtil.retrieveChecksum(data);

    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(this.data);
    }
  }
}
 
Example #23
Source File: IndexAndTaxonomyRevisionTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Test
public void testRevisionRelease() throws Exception {
  Directory indexDir = newDirectory();
  IndexWriterConfig conf = new IndexWriterConfig(null);
  conf.setIndexDeletionPolicy(new SnapshotDeletionPolicy(conf.getIndexDeletionPolicy()));
  IndexWriter indexWriter = new IndexWriter(indexDir, conf);
  
  Directory taxoDir = newDirectory();
  SnapshotDirectoryTaxonomyWriter taxoWriter = new SnapshotDirectoryTaxonomyWriter(taxoDir);
  try {
    indexWriter.addDocument(newDocument(taxoWriter));
    indexWriter.commit();
    taxoWriter.commit();
    Revision rev1 = new IndexAndTaxonomyRevision(indexWriter, taxoWriter);
    // releasing that revision should not delete the files
    rev1.release();
    assertTrue(slowFileExists(indexDir, IndexFileNames.SEGMENTS + "_1"));
    assertTrue(slowFileExists(taxoDir, IndexFileNames.SEGMENTS + "_1"));
    
    rev1 = new IndexAndTaxonomyRevision(indexWriter, taxoWriter); // create revision again, so the files are snapshotted
    indexWriter.addDocument(newDocument(taxoWriter));
    indexWriter.commit();
    taxoWriter.commit();
    assertNotNull(new IndexAndTaxonomyRevision(indexWriter, taxoWriter));
    rev1.release(); // this release should trigger the delete of segments_1
    assertFalse(slowFileExists(indexDir, IndexFileNames.SEGMENTS + "_1"));
    indexWriter.close();
  } finally {
    IOUtils.close(indexWriter, taxoWriter, taxoDir, indexDir);
  }
}
 
Example #24
Source File: Lucene50CompoundReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/** Returns the length of a file in the directory.
 * @throws IOException if the file does not exist */
@Override
public long fileLength(String name) throws IOException {
  ensureOpen();
  FileEntry e = entries.get(IndexFileNames.stripSegmentName(name));
  if (e == null)
    throw new FileNotFoundException(name);
  return e.length;
}
 
Example #25
Source File: Lucene50CompoundReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public IndexInput openInput(String name, IOContext context) throws IOException {
  ensureOpen();
  final String id = IndexFileNames.stripSegmentName(name);
  final FileEntry entry = entries.get(id);
  if (entry == null) {
    String datFileName = IndexFileNames.segmentFileName(segmentName, "", Lucene50CompoundFormat.DATA_EXTENSION);
    throw new FileNotFoundException("No sub-file with id " + id + " found in compound file \"" + datFileName + "\" (fileName=" + name + " files: " + entries.keySet() + ")");
  }
  return handle.slice(name, entry.offset, entry.length);
}
 
Example #26
Source File: VariableGapTermsIndexWriter.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public VariableGapTermsIndexWriter(SegmentWriteState state, IndexTermSelector policy) throws IOException {
  final String indexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
  out = state.directory.createOutput(indexFileName, state.context);
  boolean success = false;
  try {
    fieldInfos = state.fieldInfos;
    this.policy = policy;
    CodecUtil.writeIndexHeader(out, CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
Example #27
Source File: Lucene50FieldInfosFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public void write(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, FieldInfos infos, IOContext context) throws IOException {
  final String fileName = IndexFileNames.segmentFileName(segmentInfo.name, segmentSuffix, EXTENSION);
  try (IndexOutput output = directory.createOutput(fileName, context)) {
    CodecUtil.writeIndexHeader(output, Lucene50FieldInfosFormat.CODEC_NAME, Lucene50FieldInfosFormat.FORMAT_CURRENT, segmentInfo.getId(), segmentSuffix);
    output.writeVInt(infos.size());
    for (FieldInfo fi : infos) {
      fi.checkConsistency();

      output.writeString(fi.name);
      output.writeVInt(fi.number);

      byte bits = 0x0;
      if (fi.hasVectors()) bits |= STORE_TERMVECTOR;
      if (fi.omitsNorms()) bits |= OMIT_NORMS;
      if (fi.hasPayloads()) bits |= STORE_PAYLOADS;
      output.writeByte(bits);

      output.writeByte(indexOptionsByte(fi.getIndexOptions()));

      // pack the DV type and hasNorms in one byte
      output.writeByte(docValuesByte(fi.getDocValuesType()));
      output.writeLong(fi.getDocValuesGen());
      output.writeMapOfStrings(fi.attributes());
    }
    CodecUtil.writeFooter(output);
  }
}
 
Example #28
Source File: LocalReplicatorTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Test
public void testRevisionRelease() throws Exception {
  replicator.publish(createRevision(1));
  assertTrue(slowFileExists(sourceDir, IndexFileNames.SEGMENTS + "_1"));
  replicator.publish(createRevision(2));
  // now the files of revision 1 can be deleted
  assertTrue(slowFileExists(sourceDir, IndexFileNames.SEGMENTS + "_2"));
  assertFalse("segments_1 should not be found in index directory after revision is released", slowFileExists(sourceDir, IndexFileNames.SEGMENTS + "_1"));
}
 
Example #29
Source File: Lucene50LiveDocsFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public Bits readLiveDocs(Directory dir, SegmentCommitInfo info, IOContext context) throws IOException {
  long gen = info.getDelGen();
  String name = IndexFileNames.fileNameFromGeneration(info.info.name, EXTENSION, gen);
  final int length = info.info.maxDoc();
  try (ChecksumIndexInput input = dir.openChecksumInput(name, context)) {
    Throwable priorE = null;
    try {
      CodecUtil.checkIndexHeader(input, CODEC_NAME, VERSION_START, VERSION_CURRENT, 
                                   info.info.getId(), Long.toString(gen, Character.MAX_RADIX));
      long data[] = new long[FixedBitSet.bits2words(length)];
      for (int i = 0; i < data.length; i++) {
        data[i] = input.readLong();
      }
      FixedBitSet fbs = new FixedBitSet(data, length);
      if (fbs.length() - fbs.cardinality() != info.getDelCount()) {
        throw new CorruptIndexException("bits.deleted=" + (fbs.length() - fbs.cardinality()) + 
                                        " info.delcount=" + info.getDelCount(), input);
      }
      return fbs.asReadOnlyBits();
    } catch (Throwable exception) {
      priorE = exception;
    } finally {
      CodecUtil.checkFooter(input, priorE);
    }
  }
  throw new AssertionError();
}
 
Example #30
Source File: Lucene60FieldInfosFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public void write(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, FieldInfos infos, IOContext context) throws IOException {
  final String fileName = IndexFileNames.segmentFileName(segmentInfo.name, segmentSuffix, EXTENSION);
  try (IndexOutput output = directory.createOutput(fileName, context)) {
    CodecUtil.writeIndexHeader(output, Lucene60FieldInfosFormat.CODEC_NAME, Lucene60FieldInfosFormat.FORMAT_CURRENT, segmentInfo.getId(), segmentSuffix);
    output.writeVInt(infos.size());
    for (FieldInfo fi : infos) {
      fi.checkConsistency();

      output.writeString(fi.name);
      output.writeVInt(fi.number);

      byte bits = 0x0;
      if (fi.hasVectors()) bits |= STORE_TERMVECTOR;
      if (fi.omitsNorms()) bits |= OMIT_NORMS;
      if (fi.hasPayloads()) bits |= STORE_PAYLOADS;
      if (fi.isSoftDeletesField()) bits |= SOFT_DELETES_FIELD;
      output.writeByte(bits);

      output.writeByte(indexOptionsByte(fi.getIndexOptions()));

      // pack the DV type and hasNorms in one byte
      output.writeByte(docValuesByte(fi.getDocValuesType()));
      output.writeLong(fi.getDocValuesGen());
      output.writeMapOfStrings(fi.attributes());
      output.writeVInt(fi.getPointDimensionCount());
      if (fi.getPointDimensionCount() != 0) {
        output.writeVInt(fi.getPointIndexDimensionCount());
        output.writeVInt(fi.getPointNumBytes());
      }
    }
    CodecUtil.writeFooter(output);
  }
}