Java Code Examples for org.apache.lucene.util.IOUtils#closeWhileHandlingException()

The following examples show how to use org.apache.lucene.util.IOUtils#closeWhileHandlingException() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Lucene50RWPostingsFormat.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  PostingsWriterBase postingsWriter = new Lucene50PostingsWriter(state);
  boolean success = false;
  try {
    FieldsConsumer ret = new BlockTreeTermsWriter(state, 
                                                  postingsWriter,
                                                  BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, 
                                                  BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(postingsWriter);
    }
  }
}
 
Example 2
Source File: HyphenationCompoundWordTokenFilterFactory.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public void inform(ResourceLoader loader) throws IOException {
  InputStream stream = null;
  try {
    if (dictFile != null) // the dictionary can be empty.
      dictionary = getWordSet(loader, dictFile, false);
    // TODO: Broken, because we cannot resolve real system id
    // ResourceLoader should also supply method like ClassLoader to get resource URL
    stream = loader.openResource(hypFile);
    final InputSource is = new InputSource(stream);
    is.setEncoding(encoding); // if it's null let xml parser decide
    is.setSystemId(hypFile);
    hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
  } finally {
    IOUtils.closeWhileHandlingException(stream);
  }
}
 
Example 3
Source File: CompletionFieldsConsumer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
CompletionFieldsConsumer(String codecName, PostingsFormat delegatePostingsFormat, SegmentWriteState state) throws IOException {
  this.codecName = codecName;
  this.delegatePostingsFormatName = delegatePostingsFormat.getName();
  this.state = state;
  String dictFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, DICT_EXTENSION);
  boolean success = false;
  try {
    this.delegateFieldsConsumer = delegatePostingsFormat.fieldsConsumer(state);
    dictOut = state.directory.createOutput(dictFile, state.context);
    CodecUtil.writeIndexHeader(dictOut, codecName, COMPLETION_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    success = true;
  } finally {
    if (success == false) {
      IOUtils.closeWhileHandlingException(dictOut, delegateFieldsConsumer);
    }
  }
}
 
Example 4
Source File: Lucene80DocValuesConsumer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** expert: Creates a new writer */
public Lucene80DocValuesConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
  boolean success = false;
  try {
    this.state = state;
    String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
    data = state.directory.createOutput(dataName, state.context);
    CodecUtil.writeIndexHeader(data, dataCodec, Lucene80DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
    meta = state.directory.createOutput(metaName, state.context);
    CodecUtil.writeIndexHeader(meta, metaCodec, Lucene80DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    maxDoc = state.segmentInfo.maxDoc();
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(this);
    }
  }
}
 
Example 5
Source File: BlockTreeOrdsPostingsFormat.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  PostingsWriterBase postingsWriter = new Lucene84PostingsWriter(state);

  boolean success = false;
  try {
    FieldsConsumer ret = new OrdsBlockTreeTermsWriter(state, 
                                                      postingsWriter,
                                                      minTermBlockSize, 
                                                      maxTermBlockSize);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(postingsWriter);
    }
  }
}
 
Example 6
Source File: TranslogWriter.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
public static TranslogWriter create(Type type, ShardId shardId, String translogUUID, long fileGeneration, Path file, Callback<ChannelReference> onClose, int bufferSize, ChannelFactory channelFactory) throws IOException {
    final BytesRef ref = new BytesRef(translogUUID);
    final int headerLength = getHeaderLength(ref.length);
    final FileChannel channel = channelFactory.open(file);
    try {
        // This OutputStreamDataOutput is intentionally not closed because
        // closing it will close the FileChannel
        final OutputStreamDataOutput out = new OutputStreamDataOutput(java.nio.channels.Channels.newOutputStream(channel));
        CodecUtil.writeHeader(out, TRANSLOG_CODEC, VERSION);
        out.writeInt(ref.length);
        out.writeBytes(ref.bytes, ref.offset, ref.length);
        channel.force(true);
        writeCheckpoint(headerLength, 0, file.getParent(), fileGeneration, StandardOpenOption.WRITE);
        final TranslogWriter writer = type.create(shardId, fileGeneration, new ChannelReference(file, fileGeneration, channel, onClose), bufferSize);
        return writer;
    } catch (Throwable throwable){
        // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that
        // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition
        IOUtils.closeWhileHandlingException(channel);
        throw throwable;
    }
}
 
Example 7
Source File: PKIndexSplitter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void createIndex(IndexWriterConfig config, Directory target, DirectoryReader reader, Query preserveFilter, boolean negateFilter) throws IOException {
  boolean success = false;
  final IndexWriter w = new IndexWriter(target, config);
  try {
    final IndexSearcher searcher = new IndexSearcher(reader);
    searcher.setQueryCache(null);
    preserveFilter = searcher.rewrite(preserveFilter);
    final Weight preserveWeight = searcher.createWeight(preserveFilter, ScoreMode.COMPLETE_NO_SCORES, 1);
    final List<LeafReaderContext> leaves = reader.leaves();
    final CodecReader[] subReaders = new CodecReader[leaves.size()];
    int i = 0;
    for (final LeafReaderContext ctx : leaves) {
      subReaders[i++] = new DocumentFilteredLeafIndexReader(ctx, preserveWeight, negateFilter);
    }
    w.addIndexes(subReaders);
    success = true;
  } finally {
    if (success) {
      w.close();
    } else {
      IOUtils.closeWhileHandlingException(w);
    }
  }
}
 
Example 8
Source File: TestPostingsOffsets.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void checkTokens(Token[] tokens) throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
  boolean success = false;
  try {
    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
    // store some term vectors for the checkindex cross-check
    ft.setStoreTermVectors(true);
    ft.setStoreTermVectorPositions(true);
    ft.setStoreTermVectorOffsets(true);
   
    Document doc = new Document();
    doc.add(new Field("body", new CannedTokenStream(tokens), ft));
    riw.addDocument(doc);
    riw.close();
    success = true;
  } finally {
    if (success) {
      IOUtils.close(dir);
    } else {
      IOUtils.closeWhileHandlingException(riw, dir);
    }
  }
}
 
Example 9
Source File: TestPostingsOffsets.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void checkTokens(Token[] field1, Token[] field2) throws IOException {
  Directory dir = newDirectory();
  RandomIndexWriter riw = new RandomIndexWriter(random(), dir, iwc);
  boolean success = false;
  try {
    FieldType ft = new FieldType(TextField.TYPE_NOT_STORED);
    ft.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS);
    // store some term vectors for the checkindex cross-check
    ft.setStoreTermVectors(true);
    ft.setStoreTermVectorPositions(true);
    ft.setStoreTermVectorOffsets(true);
   
    Document doc = new Document();
    doc.add(new Field("body", new CannedTokenStream(field1), ft));
    doc.add(new Field("body", new CannedTokenStream(field2), ft));
    riw.addDocument(doc);
    riw.close();
    success = true;
  } finally {
    if (success) {
      IOUtils.close(dir);
    } else {
      IOUtils.closeWhileHandlingException(riw, dir);
    }
  }
}
 
Example 10
Source File: TestTransactions.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public void doWork() throws Throwable {
  IndexReader r1=null, r2=null;
  synchronized(lock) {
    try {
      r1 = DirectoryReader.open(dir1);
      r2 = DirectoryReader.open(dir2);
    } catch (Exception e) {
      // can be rethrown as RuntimeException if it happens during a close listener
      if (!e.getMessage().contains("on purpose")) {
        throw e;
      }
      // release resources
      IOUtils.closeWhileHandlingException(r1, r2);
      return;
    }
  }
  if (r1.numDocs() != r2.numDocs()) {
    throw new RuntimeException("doc counts differ: r1=" + r1.numDocs() + " r2=" + r2.numDocs());
  }
  IOUtils.closeWhileHandlingException(r1, r2);
}
 
Example 11
Source File: Completion090PostingsFormat.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
public CompletionFieldsProducer(SegmentReadState state) throws IOException {
    String suggestFSTFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION);
    IndexInput input = state.directory.openInput(suggestFSTFile, state.context);
    version = CodecUtil.checkHeader(input, CODEC_NAME, SUGGEST_CODEC_VERSION, SUGGEST_VERSION_CURRENT);
    FieldsProducer delegateProducer = null;
    boolean success = false;
    try {
        PostingsFormat delegatePostingsFormat = PostingsFormat.forName(input.readString());
        String providerName = input.readString();
        CompletionLookupProvider completionLookupProvider = providers.get(providerName);
        if (completionLookupProvider == null) {
            throw new IllegalStateException("no provider with name [" + providerName + "] registered");
        }
        // TODO: we could clone the ReadState and make it always forward IOContext.MERGE to prevent unecessary heap usage?
        delegateProducer = delegatePostingsFormat.fieldsProducer(state);
        /*
         * If we are merging we don't load the FSTs at all such that we
         * don't consume so much memory during merge
         */
        if (state.context.context != Context.MERGE) {
            // TODO: maybe we can do this in a fully lazy fashion based on some configuration
            // eventually we should have some kind of curciut breaker that prevents us from going OOM here
            // with some configuration
            this.lookupFactory = completionLookupProvider.load(input);
        } else {
            this.lookupFactory = null;
        }
        this.delegateProducer = delegateProducer;
        success = true;
    } finally {
        if (!success) {
            IOUtils.closeWhileHandlingException(delegateProducer, input);
        } else {
            IOUtils.close(input);
        }
    }
}
 
Example 12
Source File: PerFieldDocValuesFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public FieldsReader(final SegmentReadState readState) throws IOException {

      // Init each unique format:
      boolean success = false;
      try {
        // Read field name -> format name
        for (FieldInfo fi : readState.fieldInfos) {
          if (fi.getDocValuesType() != DocValuesType.NONE) {
            final String fieldName = fi.name;
            final String formatName = fi.getAttribute(PER_FIELD_FORMAT_KEY);
            if (formatName != null) {
              // null formatName means the field is in fieldInfos, but has no docvalues!
              final String suffix = fi.getAttribute(PER_FIELD_SUFFIX_KEY);
              if (suffix == null) {
                throw new IllegalStateException("missing attribute: " + PER_FIELD_SUFFIX_KEY + " for field: " + fieldName);
              }
              DocValuesFormat format = DocValuesFormat.forName(formatName);
              String segmentSuffix = getFullSegmentSuffix(readState.segmentSuffix, getSuffix(formatName, suffix));
              if (!formats.containsKey(segmentSuffix)) {
                formats.put(segmentSuffix, format.fieldsProducer(new SegmentReadState(readState, segmentSuffix)));
              }
              fields.put(fieldName, formats.get(segmentSuffix));
            }
          }
        }
        success = true;
      } finally {
        if (!success) {
          IOUtils.closeWhileHandlingException(formats.values());
        }
      }
    }
 
Example 13
Source File: BlobTransferTarget.java    From crate with Apache License 2.0 5 votes vote down vote up
public void stopRecovery() {
    synchronized (lock) {
        recoveryActive = false;
        for (UUID finishedUpload : finishedUploads) {
            LOGGER.debug("finished transfer and recovery for {}, removing state", finishedUpload);
            BlobTransferStatus transferStatus = activeTransfers.remove(finishedUpload);
            IOUtils.closeWhileHandlingException(transferStatus);
        }
    }
}
 
Example 14
Source File: HandleTrackingFS.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Helper method, to deal with onOpen() throwing exception
 */
final void callOpenHook(Path path, Closeable stream) throws IOException {
  boolean success = false;
  try {
    onOpen(path, stream);
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(stream);
    }
  }
}
 
Example 15
Source File: SolrSnapshotMetaDataManager.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private synchronized void persist() throws IOException {
  String fileName = SNAPSHOTS_PREFIX + nextWriteGen;
  IndexOutput out = dir.createOutput(fileName, IOContext.DEFAULT);
  boolean success = false;
  try {
    CodecUtil.writeHeader(out, CODEC_NAME, VERSION_CURRENT);
    out.writeVInt(nameToDetailsMapping.size());
    for(Entry<String,SnapshotMetaData> ent : nameToDetailsMapping.entrySet()) {
      out.writeString(ent.getKey());
      out.writeString(ent.getValue().getIndexDirPath());
      out.writeVLong(ent.getValue().getGenerationNumber());
    }
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
      IOUtils.deleteFilesIgnoringExceptions(dir, fileName);
    } else {
      IOUtils.close(out);
    }
  }

  dir.sync(Collections.singletonList(fileName));

  if (nextWriteGen > 0) {
    String lastSaveFile = SNAPSHOTS_PREFIX + (nextWriteGen-1);
    // exception OK: likely it didn't exist
    IOUtils.deleteFilesIgnoringExceptions(dir, lastSaveFile);
  }

  nextWriteGen++;
}
 
Example 16
Source File: Blur022SegmentInfoReader.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
@Override
public SegmentInfo read(Directory dir, String segment, IOContext context) throws IOException {
  final String fileName = IndexFileNames.segmentFileName(segment, "", Blur022SegmentInfoFormat.SI_EXTENSION);
  final IndexInput input = dir.openInput(fileName, context);
  boolean success = false;
  try {
    CodecUtil.checkHeader(input, Blur022SegmentInfoFormat.CODEC_NAME, Blur022SegmentInfoFormat.VERSION_START,
        Blur022SegmentInfoFormat.VERSION_CURRENT);
    final String version = input.readString();
    final int docCount = input.readInt();
    if (docCount < 0) {
      throw new CorruptIndexException("invalid docCount: " + docCount + " (resource=" + input + ")");
    }
    final boolean isCompoundFile = input.readByte() == SegmentInfo.YES;
    final Map<String, String> diagnostics = input.readStringStringMap();
    final Map<String, String> attributes = input.readStringStringMap();
    final Set<String> files = input.readStringSet();

    if (input.getFilePointer() != input.length()) {
      throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read "
          + input.getFilePointer() + " vs size " + input.length() + " (resource: " + input + ")");
    }

    final SegmentInfo si = new SegmentInfo(dir, version, segment, docCount, isCompoundFile, null, diagnostics,
        Collections.unmodifiableMap(attributes));
    si.setFiles(files);

    success = true;

    return si;

  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(input);
    } else {
      input.close();
    }
  }
}
 
Example 17
Source File: PropertiesSettingsLoader.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public Map<String, String> load(byte[] source) throws IOException {
    Properties props = new NoDuplicatesProperties();
    StreamInput stream = StreamInput.wrap(source);
    try {
        props.load(stream);
        Map<String, String> result = newHashMap();
        for (Map.Entry entry : props.entrySet()) {
            result.put((String) entry.getKey(), (String) entry.getValue());
        }
        return result;
    } finally {
        IOUtils.closeWhileHandlingException(stream);
    }
}
 
Example 18
Source File: TestIndexTooManyDocs.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public void testIndexTooManyDocs() throws IOException, InterruptedException {
  Directory dir = newDirectory();
  int numMaxDoc = 25;
  IndexWriterConfig config = new IndexWriterConfig();
  config.setRAMBufferSizeMB(0.000001); // force lots of small segments and logs of concurrent deletes
  IndexWriter writer = new IndexWriter(dir, config);
  try {
    IndexWriter.setMaxDocs(numMaxDoc);
    int numThreads = 5 + random().nextInt(5);
    Thread[] threads = new Thread[numThreads];
    CountDownLatch latch = new CountDownLatch(numThreads);
    CountDownLatch indexingDone = new CountDownLatch(numThreads - 2);
    AtomicBoolean done = new AtomicBoolean(false);
    for (int i = 0; i < numThreads; i++) {
      if (i >= 2) {
        threads[i] = new Thread(() -> {
          latch.countDown();
          try {
            try {
              latch.await();
            } catch (InterruptedException e) {
              throw new AssertionError(e);
            }
            for (int d = 0; d < 100; d++) {
              Document doc = new Document();
              String id = Integer.toString(random().nextInt(numMaxDoc * 2));
              doc.add(new StringField("id", id, Field.Store.NO));
              try {
                Term t = new Term("id", id);
                if (random().nextInt(5) == 0) {
                  writer.deleteDocuments(new TermQuery(t));
                }
                writer.updateDocument(t, doc);
              } catch (IOException e) {
                throw new AssertionError(e);
              } catch (IllegalArgumentException e) {
                assertEquals("number of documents in the index cannot exceed " + IndexWriter.getActualMaxDocs(), e.getMessage());
              }
            }
          } finally {
            indexingDone.countDown();
          }
        });
      } else {
        threads[i] = new Thread(() -> {
          try {
            latch.countDown();
            latch.await();
            DirectoryReader open = DirectoryReader.open(writer, true, true);
            while (done.get() == false) {
              DirectoryReader directoryReader = DirectoryReader.openIfChanged(open);
              if (directoryReader != null) {
                open.close();
                open = directoryReader;
              }
            }
            IOUtils.closeWhileHandlingException(open);
          } catch (Exception e) {
            throw new AssertionError(e);
          }
        });
      }
      threads[i].start();
    }

    indexingDone.await();
    done.set(true);


    for (int i = 0; i < numThreads; i++) {
      threads[i].join();
    }
    writer.close();
    dir.close();
  } finally {
    IndexWriter.setMaxDocs(IndexWriter.MAX_DOCS);
  }
}
 
Example 19
Source File: VersionBlockTreeTermsWriter.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
/** Create a new writer.  The number of items (terms or
 *  sub-blocks) per block will aim to be between
 *  minItemsPerBlock and maxItemsPerBlock, though in some
 *  cases the blocks may be smaller than the min. */
public VersionBlockTreeTermsWriter(
                                   SegmentWriteState state,
                                   PostingsWriterBase postingsWriter,
                                   int minItemsInBlock,
                                   int maxItemsInBlock)
  throws IOException
{
  BlockTreeTermsWriter.validateSettings(minItemsInBlock, maxItemsInBlock);
  maxDoc = state.segmentInfo.maxDoc();

  final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION);
  out = state.directory.createOutput(termsFileName, state.context);
  boolean success = false;
  IndexOutput indexOut = null;
  try {
    fieldInfos = state.fieldInfos;
    this.minItemsInBlock = minItemsInBlock;
    this.maxItemsInBlock = maxItemsInBlock;
    CodecUtil.writeIndexHeader(out, TERMS_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);   

    //DEBUG = state.segmentName.equals("_4a");

    final String termsIndexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
    indexOut = state.directory.createOutput(termsIndexFileName, state.context);
    CodecUtil.writeIndexHeader(indexOut, TERMS_INDEX_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix); 

    this.postingsWriter = postingsWriter;
    // segment = state.segmentInfo.name;

    // System.out.println("BTW.init seg=" + state.segmentName);

    postingsWriter.init(out, state);                          // have consumer write its format/header
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out, indexOut);
    }
  }
  this.indexOut = indexOut;
}
 
Example 20
Source File: Lucene84PostingsReader.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
/** Sole constructor. */
public Lucene84PostingsReader(SegmentReadState state) throws IOException {
  boolean success = false;
  IndexInput docIn = null;
  IndexInput posIn = null;
  IndexInput payIn = null;
  
  // NOTE: these data files are too costly to verify checksum against all the bytes on open,
  // but for now we at least verify proper structure of the checksum footer: which looks
  // for FOOTER_MAGIC + algorithmID. This is cheap and can detect some forms of corruption
  // such as file truncation.
  
  String docName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, Lucene84PostingsFormat.DOC_EXTENSION);
  try {
    docIn = state.directory.openInput(docName, state.context);
    version = CodecUtil.checkIndexHeader(docIn, DOC_CODEC, VERSION_START, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    CodecUtil.retrieveChecksum(docIn);

    if (state.fieldInfos.hasProx()) {
      String proxName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, Lucene84PostingsFormat.POS_EXTENSION);
      posIn = state.directory.openInput(proxName, state.context);
      CodecUtil.checkIndexHeader(posIn, POS_CODEC, version, version, state.segmentInfo.getId(), state.segmentSuffix);
      CodecUtil.retrieveChecksum(posIn);

      if (state.fieldInfos.hasPayloads() || state.fieldInfos.hasOffsets()) {
        String payName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, Lucene84PostingsFormat.PAY_EXTENSION);
        payIn = state.directory.openInput(payName, state.context);
        CodecUtil.checkIndexHeader(payIn, PAY_CODEC, version, version, state.segmentInfo.getId(), state.segmentSuffix);
        CodecUtil.retrieveChecksum(payIn);
      }
    }

    this.docIn = docIn;
    this.posIn = posIn;
    this.payIn = payIn;
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(docIn, posIn, payIn);
    }
  }
}