org.apache.lucene.index.SegmentWriteState Java Examples

The following examples show how to use org.apache.lucene.index.SegmentWriteState. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: Completion090PostingsFormat.java    From Elasticsearch with Apache License 2.0 6 votes vote down vote up
public CompletionFieldsConsumer(SegmentWriteState state) throws IOException {
    this.delegatesFieldsConsumer = delegatePostingsFormat.fieldsConsumer(state);
    String suggestFSTFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, EXTENSION);
    IndexOutput output = null;
    boolean success = false;
    try {
        output = state.directory.createOutput(suggestFSTFile, state.context);
        CodecUtil.writeHeader(output, CODEC_NAME, SUGGEST_VERSION_CURRENT);
        /*
         * we write the delegate postings format name so we can load it
         * without getting an instance in the ctor
         */
        output.writeString(delegatePostingsFormat.getName());
        output.writeString(writeProvider.getName());
        this.suggestFieldsConsumer = writeProvider.consumer(output);
        success = true;
    } finally {
        if (!success) {
            IOUtils.closeWhileHandlingException(output);
        }
    }
}
 
Example #2
Source File: Lucene80DocValuesConsumer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** expert: Creates a new writer */
public Lucene80DocValuesConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
  boolean success = false;
  try {
    this.state = state;
    String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
    data = state.directory.createOutput(dataName, state.context);
    CodecUtil.writeIndexHeader(data, dataCodec, Lucene80DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
    meta = state.directory.createOutput(metaName, state.context);
    CodecUtil.writeIndexHeader(meta, metaCodec, Lucene80DocValuesFormat.VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    maxDoc = state.segmentInfo.maxDoc();
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(this);
    }
  }
}
 
Example #3
Source File: CompletionFieldsConsumer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
CompletionFieldsConsumer(String codecName, PostingsFormat delegatePostingsFormat, SegmentWriteState state) throws IOException {
  this.codecName = codecName;
  this.delegatePostingsFormatName = delegatePostingsFormat.getName();
  this.state = state;
  String dictFile = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, DICT_EXTENSION);
  boolean success = false;
  try {
    this.delegateFieldsConsumer = delegatePostingsFormat.fieldsConsumer(state);
    dictOut = state.directory.createOutput(dictFile, state.context);
    CodecUtil.writeIndexHeader(dictOut, codecName, COMPLETION_VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    success = true;
  } finally {
    if (success == false) {
      IOUtils.closeWhileHandlingException(dictOut, delegateFieldsConsumer);
    }
  }
}
 
Example #4
Source File: Lucene80NormsConsumer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
Lucene80NormsConsumer(SegmentWriteState state, String dataCodec, String dataExtension, String metaCodec, String metaExtension) throws IOException {
  boolean success = false;
  try {
    String dataName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, dataExtension);
    data = state.directory.createOutput(dataName, state.context);
    CodecUtil.writeIndexHeader(data, dataCodec, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    String metaName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, metaExtension);
    meta = state.directory.createOutput(metaName, state.context);
    CodecUtil.writeIndexHeader(meta, metaCodec, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    maxDoc = state.segmentInfo.maxDoc();
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(this);
    }
  }
}
 
Example #5
Source File: IDVersionPostingsFormat.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  PostingsWriterBase postingsWriter = new IDVersionPostingsWriter(state.liveDocs);
  boolean success = false;
  try {
    FieldsConsumer ret = new VersionBlockTreeTermsWriter(state, 
                                                         postingsWriter,
                                                         minTermsInBlock, 
                                                         maxTermsInBlock);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(postingsWriter);
    }
  }
}
 
Example #6
Source File: Lucene84PostingsFormat.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  PostingsWriterBase postingsWriter = new Lucene84PostingsWriter(state);
  boolean success = false;
  try {
    FieldsConsumer ret = new BlockTreeTermsWriter(state, 
                                                  postingsWriter,
                                                  minTermBlockSize, 
                                                  maxTermBlockSize);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(postingsWriter);
    }
  }
}
 
Example #7
Source File: Lucene50RWPostingsFormat.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  PostingsWriterBase postingsWriter = new Lucene50PostingsWriter(state);
  boolean success = false;
  try {
    FieldsConsumer ret = new BlockTreeTermsWriter(state, 
                                                  postingsWriter,
                                                  BlockTreeTermsWriter.DEFAULT_MIN_BLOCK_SIZE, 
                                                  BlockTreeTermsWriter.DEFAULT_MAX_BLOCK_SIZE);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(postingsWriter);
    }
  }
}
 
Example #8
Source File: BlockTermsWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public BlockTermsWriter(TermsIndexWriterBase termsIndexWriter,
    SegmentWriteState state, PostingsWriterBase postingsWriter)
    throws IOException {
  final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION);
  this.termsIndexWriter = termsIndexWriter;
  maxDoc = state.segmentInfo.maxDoc();
  out = state.directory.createOutput(termsFileName, state.context);
  boolean success = false;
  try {
    fieldInfos = state.fieldInfos;
    CodecUtil.writeIndexHeader(out, CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    currentField = null;
    this.postingsWriter = postingsWriter;
    // segment = state.segmentName;
    
    //System.out.println("BTW.init seg=" + state.segmentName);
    
    postingsWriter.init(out, state); // have consumer write its format/header
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
Example #9
Source File: FixedGapTermsIndexWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public FixedGapTermsIndexWriter(SegmentWriteState state, int termIndexInterval) throws IOException {
  if (termIndexInterval <= 0) {
    throw new IllegalArgumentException("invalid termIndexInterval: " + termIndexInterval);
  }
  this.termIndexInterval = termIndexInterval;
  final String indexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
  out = state.directory.createOutput(indexFileName, state.context);
  boolean success = false;
  try {
    CodecUtil.writeIndexHeader(out, CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    out.writeVInt(termIndexInterval);
    out.writeVInt(PackedInts.VERSION_CURRENT);
    out.writeVInt(BLOCKSIZE);
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
Example #10
Source File: STUniformSplitRot13PostingsFormat.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
protected FieldsConsumer createFieldsConsumer(SegmentWriteState segmentWriteState, PostingsWriterBase postingsWriter) throws IOException {
  return new STUniformSplitTermsWriter(postingsWriter, segmentWriteState,
      UniformSplitTermsWriter.DEFAULT_TARGET_NUM_BLOCK_LINES,
      UniformSplitTermsWriter.DEFAULT_DELTA_NUM_LINES,
      getBlockEncoder()
  ) {
    @Override
    protected void writeDictionary(IndexDictionary.Builder dictionaryBuilder) throws IOException {
      recordBlockEncodingCall();
      super.writeDictionary(dictionaryBuilder);
      recordDictionaryEncodingCall();
    }
    @Override
    protected void writeEncodedFieldsMetadata(ByteBuffersDataOutput fieldsOutput) throws IOException {
      recordBlockEncodingCall();
      super.writeEncodedFieldsMetadata(fieldsOutput);
      recordFieldsMetadataEncodingCall();
    }
  };
}
 
Example #11
Source File: BlockTreeOrdsPostingsFormat.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  PostingsWriterBase postingsWriter = new Lucene84PostingsWriter(state);

  boolean success = false;
  try {
    FieldsConsumer ret = new OrdsBlockTreeTermsWriter(state, 
                                                      postingsWriter,
                                                      minTermBlockSize, 
                                                      maxTermBlockSize);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(postingsWriter);
    }
  }
}
 
Example #12
Source File: UniformSplitRot13PostingsFormat.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
protected FieldsConsumer createFieldsConsumer(SegmentWriteState segmentWriteState, PostingsWriterBase postingsWriter) throws IOException {
  return new UniformSplitTermsWriter(postingsWriter, segmentWriteState,
      UniformSplitTermsWriter.DEFAULT_TARGET_NUM_BLOCK_LINES,
      UniformSplitTermsWriter.DEFAULT_DELTA_NUM_LINES,
      getBlockEncoder()
  ) {
    @Override
    protected void writeDictionary(IndexDictionary.Builder dictionaryBuilder) throws IOException {
      recordBlockEncodingCall();
      super.writeDictionary(dictionaryBuilder);
      recordDictionaryEncodingCall();
    }
    @Override
    protected void writeEncodedFieldsMetadata(ByteBuffersDataOutput fieldsOutput) throws IOException {
      super.writeEncodedFieldsMetadata(fieldsOutput);
      recordFieldsMetadataEncodingCall();
    }
  };
}
 
Example #13
Source File: Lucene60PointsWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** Full constructor */
public Lucene60PointsWriter(SegmentWriteState writeState, int maxPointsInLeafNode, double maxMBSortInHeap) throws IOException {
  assert writeState.fieldInfos.hasPointValues();
  this.writeState = writeState;
  this.maxPointsInLeafNode = maxPointsInLeafNode;
  this.maxMBSortInHeap = maxMBSortInHeap;
  String dataFileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name,
                                                       writeState.segmentSuffix,
                                                       Lucene60PointsFormat.DATA_EXTENSION);
  dataOut = writeState.directory.createOutput(dataFileName, writeState.context);
  boolean success = false;
  try {
    CodecUtil.writeIndexHeader(dataOut,
                               Lucene60PointsFormat.DATA_CODEC_NAME,
                               Lucene60PointsFormat.DATA_VERSION_CURRENT,
                               writeState.segmentInfo.getId(),
                               writeState.segmentSuffix);
    success = true;
  } finally {
    if (success == false) {
      IOUtils.closeWhileHandlingException(dataOut);
    }
  }
}
 
Example #14
Source File: FSTTermsWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
public FSTTermsWriter(SegmentWriteState state, PostingsWriterBase postingsWriter) throws IOException {
  final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION);

  this.postingsWriter = postingsWriter;
  this.fieldInfos = state.fieldInfos;
  this.out = state.directory.createOutput(termsFileName, state.context);
  this.maxDoc = state.segmentInfo.maxDoc();

  boolean success = false;
  try {
    CodecUtil.writeIndexHeader(out, TERMS_CODEC_NAME, TERMS_VERSION_CURRENT,
                                      state.segmentInfo.getId(), state.segmentSuffix);   

    this.postingsWriter.init(out, state); 
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
Example #15
Source File: CrankyDocValuesFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public DocValuesConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  if (random.nextInt(100) == 0) {
    throw new IOException("Fake IOException from DocValuesFormat.fieldsConsumer()");
  }
  return new CrankyDocValuesConsumer(delegate.fieldsConsumer(state), random);
}
 
Example #16
Source File: Completion090PostingsFormat.java    From Elasticsearch with Apache License 2.0 5 votes vote down vote up
@Override
public CompletionFieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
    if (delegatePostingsFormat == null) {
        throw new UnsupportedOperationException("Error - " + getClass().getName()
                + " has been constructed without a choice of PostingsFormat");
    }
    assert writeProvider != null;
    return new CompletionFieldsConsumer(state);
}
 
Example #17
Source File: TestGeo3DPoint.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private static Codec getCodec() {
  if (Codec.getDefault().getName().equals("Lucene84")) {
    int maxPointsInLeafNode = TestUtil.nextInt(random(), 16, 2048);
    double maxMBSortInHeap = 3.0 + (3*random().nextDouble());
    if (VERBOSE) {
      System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode + " and maxMBSortInHeap=" + maxMBSortInHeap);
    }

    return new FilterCodec("Lucene84", Codec.getDefault()) {
      @Override
      public PointsFormat pointsFormat() {
        return new PointsFormat() {
          @Override
          public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException {
            return new Lucene86PointsWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap);
          }

          @Override
          public PointsReader fieldsReader(SegmentReadState readState) throws IOException {
            return new Lucene86PointsReader(readState);
          }
        };
      }
    };
  } else {
    return Codec.getDefault();
  }
}
 
Example #18
Source File: FSTPostingsFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  PostingsWriterBase postingsWriter = new Lucene84PostingsWriter(state);

  boolean success = false;
  try {
    FieldsConsumer ret = new FSTTermsWriter(state, postingsWriter);
    success = true;
    return ret;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(postingsWriter);
    }
  }
}
 
Example #19
Source File: VariableGapTermsIndexWriter.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public VariableGapTermsIndexWriter(SegmentWriteState state, IndexTermSelector policy) throws IOException {
  final String indexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
  out = state.directory.createOutput(indexFileName, state.context);
  boolean success = false;
  try {
    fieldInfos = state.fieldInfos;
    this.policy = policy;
    CodecUtil.writeIndexHeader(out, CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    }
  }
}
 
Example #20
Source File: MtasCodecPostingsFormat.java    From mtas with Apache License 2.0 5 votes vote down vote up
@Override
public final FieldsConsumer fieldsConsumer(SegmentWriteState state)
    throws IOException {
  if (delegatePostingsFormat != null) {
    return new MtasFieldsConsumer(
        delegatePostingsFormat.fieldsConsumer(state), state, getName(),
        delegatePostingsFormat.getName());
  } else {
    PostingsFormat pf = Codec.forName(delegateCodecName).postingsFormat();
    return pf.fieldsConsumer(state);
  }
}
 
Example #21
Source File: OrdsBlockTreeTermsWriter.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/** Create a new writer.  The number of items (terms or
 *  sub-blocks) per block will aim to be between
 *  minItemsPerBlock and maxItemsPerBlock, though in some
 *  cases the blocks may be smaller than the min. */
public OrdsBlockTreeTermsWriter(
                                SegmentWriteState state,
                                PostingsWriterBase postingsWriter,
                                int minItemsInBlock,
                                int maxItemsInBlock)
  throws IOException
{
  BlockTreeTermsWriter.validateSettings(minItemsInBlock, maxItemsInBlock);

  maxDoc = state.segmentInfo.maxDoc();

  final String termsFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_EXTENSION);
  out = state.directory.createOutput(termsFileName, state.context);
  boolean success = false;
  IndexOutput indexOut = null;
  try {
    fieldInfos = state.fieldInfos;
    this.minItemsInBlock = minItemsInBlock;
    this.maxItemsInBlock = maxItemsInBlock;
    CodecUtil.writeIndexHeader(out, TERMS_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);

    final String termsIndexFileName = IndexFileNames.segmentFileName(state.segmentInfo.name, state.segmentSuffix, TERMS_INDEX_EXTENSION);
    indexOut = state.directory.createOutput(termsIndexFileName, state.context);
    CodecUtil.writeIndexHeader(indexOut, TERMS_INDEX_CODEC_NAME, VERSION_CURRENT, state.segmentInfo.getId(), state.segmentSuffix);

    this.postingsWriter = postingsWriter;
    // segment = state.segmentInfo.name;

    // System.out.println("BTW.init seg=" + state.segmentName);

    postingsWriter.init(out, state);                          // have consumer write its format/header
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out, indexOut);
    }
  }
  this.indexOut = indexOut;
}
 
Example #22
Source File: UniformSplitPostingsFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  PostingsWriterBase postingsWriter = new Lucene84PostingsWriter(state);
  boolean success = false;
  try {
    FieldsConsumer termsWriter = createUniformSplitTermsWriter(postingsWriter, state, targetNumBlockLines, deltaNumLines, blockEncoder);
    success = true;
    return termsWriter;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(postingsWriter);
    }
  }
}
 
Example #23
Source File: TestLucene86PointsFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public TestLucene86PointsFormat() {
  // standard issue
  Codec defaultCodec = new Lucene86Codec();
  if (random().nextBoolean()) {
    // randomize parameters
    maxPointsInLeafNode = TestUtil.nextInt(random(), 50, 500);
    double maxMBSortInHeap = 3.0 + (3*random().nextDouble());
    if (VERBOSE) {
      System.out.println("TEST: using Lucene60PointsFormat with maxPointsInLeafNode=" + maxPointsInLeafNode + " and maxMBSortInHeap=" + maxMBSortInHeap);
    }

    // sneaky impersonation!
    codec = new FilterCodec(defaultCodec.getName(), defaultCodec) {
      @Override
      public PointsFormat pointsFormat() {
        return new PointsFormat() {
          @Override
          public PointsWriter fieldsWriter(SegmentWriteState writeState) throws IOException {
            return new Lucene86PointsWriter(writeState, maxPointsInLeafNode, maxMBSortInHeap);
          }

          @Override
          public PointsReader fieldsReader(SegmentReadState readState) throws IOException {
            return new Lucene86PointsReader(readState);
          }
        };
      }
    };
  } else {
    // standard issue
    codec = defaultCodec;
    maxPointsInLeafNode = BKDWriter.DEFAULT_MAX_POINTS_IN_LEAF_NODE;
  }
}
 
Example #24
Source File: LuceneFixedGap.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  PostingsWriterBase docs = new Lucene84PostingsWriter(state);

  // TODO: should we make the terms index more easily
  // pluggable?  Ie so that this codec would record which
  // index impl was used, and switch on loading?
  // Or... you must make a new Codec for this?
  TermsIndexWriterBase indexWriter;
  boolean success = false;
  try {
    indexWriter = new FixedGapTermsIndexWriter(state, termIndexInterval);
    success = true;
  } finally {
    if (!success) {
      docs.close();
    }
  }

  success = false;
  try {
    // Must use BlockTermsWriter (not BlockTree) because
    // BlockTree doens't support ords (yet)...
    FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, docs);
    success = true;
    return ret;
  } finally {
    if (!success) {
      try {
        docs.close();
      } finally {
        indexWriter.close();
      }
    }
  }
}
 
Example #25
Source File: CompletionPostingsFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  PostingsFormat delegatePostingsFormat = delegatePostingsFormat();
  if (delegatePostingsFormat == null) {
    throw new UnsupportedOperationException("Error - " + getClass().getName()
        + " has been constructed without a choice of PostingsFormat");
  }
  return new CompletionFieldsConsumer(getName(), delegatePostingsFormat, state);
}
 
Example #26
Source File: LuceneVarGapDocFreqInterval.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  PostingsWriterBase docs = new Lucene84PostingsWriter(state);

  // TODO: should we make the terms index more easily
  // pluggable?  Ie so that this codec would record which
  // index impl was used, and switch on loading?
  // Or... you must make a new Codec for this?
  TermsIndexWriterBase indexWriter;
  boolean success = false;
  try {
    indexWriter = new VariableGapTermsIndexWriter(state, new VariableGapTermsIndexWriter.EveryNOrDocFreqTermSelector(docFreqThreshold, termIndexInterval));
    success = true;
  } finally {
    if (!success) {
      docs.close();
    }
  }

  success = false;
  try {
    // Must use BlockTermsWriter (not BlockTree) because
    // BlockTree doens't support ords (yet)...
    FieldsConsumer ret = new BlockTermsWriter(indexWriter, state, docs);
    success = true;
    return ret;
  } finally {
    if (!success) {
      try {
        docs.close();
      } finally {
        indexWriter.close();
      }
    }
  }
}
 
Example #27
Source File: RAMOnlyPostingsFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState writeState) throws IOException {
  final int id = nextID.getAndIncrement();

  // TODO -- ok to do this up front instead of
  // on close....?  should be ok?
  // Write our ID:
  final String idFileName = IndexFileNames.segmentFileName(writeState.segmentInfo.name, writeState.segmentSuffix, ID_EXTENSION);
  IndexOutput out = writeState.directory.createOutput(idFileName, writeState.context);
  boolean success = false;
  try {
    CodecUtil.writeHeader(out, RAM_ONLY_NAME, VERSION_LATEST);
    out.writeVInt(id);
    success = true;
  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(out);
    } else {
      IOUtils.close(out);
    }
  }
  
  final RAMPostings postings = new RAMPostings();
  final RAMFieldsConsumer consumer = new RAMFieldsConsumer(writeState, postings);

  synchronized(state) {
    state.put(id, postings);
  }
  return consumer;
}
 
Example #28
Source File: CrankyNormsFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public NormsConsumer normsConsumer(SegmentWriteState state) throws IOException {
  if (random.nextInt(100) == 0) {
    throw new IOException("Fake IOException from NormsFormat.normsConsumer()");
  }
  return new CrankyNormsConsumer(delegate.normsConsumer(state), random);
}
 
Example #29
Source File: CrankyPostingsFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public FieldsConsumer fieldsConsumer(SegmentWriteState state) throws IOException {
  if (random.nextInt(100) == 0) {
    throw new IOException("Fake IOException from PostingsFormat.fieldsConsumer()");
  }  
  return new CrankyFieldsConsumer(delegate.fieldsConsumer(state), random);
}
 
Example #30
Source File: PerFieldPostingsFormat.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
private Map<PostingsFormat, FieldsGroup> buildFieldsGroupMapping(Iterable<String> indexedFieldNames) {
  // Maps a PostingsFormat instance to the suffix it should use
  Map<PostingsFormat,FieldsGroup.Builder> formatToGroupBuilders = new HashMap<>();

  // Holds last suffix of each PostingFormat name
  Map<String,Integer> suffixes = new HashMap<>();

  // Assign field -> PostingsFormat
  for(String field : indexedFieldNames) {
    FieldInfo fieldInfo = writeState.fieldInfos.fieldInfo(field);
    // TODO: This should check current format from the field attribute?
    final PostingsFormat format = getPostingsFormatForField(field);

    if (format == null) {
      throw new IllegalStateException("invalid null PostingsFormat for field=\"" + field + "\"");
    }
    String formatName = format.getName();

    FieldsGroup.Builder groupBuilder = formatToGroupBuilders.get(format);
    if (groupBuilder == null) {
      // First time we are seeing this format; create a new instance

      // bump the suffix
      Integer suffix = suffixes.get(formatName);
      if (suffix == null) {
        suffix = 0;
      } else {
        suffix = suffix + 1;
      }
      suffixes.put(formatName, suffix);

      String segmentSuffix = getFullSegmentSuffix(field,
                                                  writeState.segmentSuffix,
                                                  getSuffix(formatName, Integer.toString(suffix)));
      groupBuilder = new FieldsGroup.Builder(suffix, new SegmentWriteState(writeState, segmentSuffix));
      formatToGroupBuilders.put(format, groupBuilder);
    } else {
      // we've already seen this format, so just grab its suffix
      if (!suffixes.containsKey(formatName)) {
        throw new IllegalStateException("no suffix for format name: " + formatName + ", expected: " + groupBuilder.suffix);
      }
    }

    groupBuilder.addField(field);

    fieldInfo.putAttribute(PER_FIELD_FORMAT_KEY, formatName);
    fieldInfo.putAttribute(PER_FIELD_SUFFIX_KEY, Integer.toString(groupBuilder.suffix));
  }

  Map<PostingsFormat,FieldsGroup> formatToGroups = new HashMap<>((int) (formatToGroupBuilders.size() / 0.75f) + 1);
  formatToGroupBuilders.forEach((postingsFormat, builder) -> formatToGroups.put(postingsFormat, builder.build()));
  return formatToGroups;
}