Java Code Examples for org.apache.lucene.index.FieldInfo#hasPayloads()

The following examples show how to use org.apache.lucene.index.FieldInfo#hasPayloads() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Lucene84PostingsReader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public ImpactsEnum impacts(FieldInfo fieldInfo, BlockTermState state, int flags) throws IOException {
  if (state.docFreq <= BLOCK_SIZE) {
    // no skip data
    return new SlowImpactsEnum(postings(fieldInfo, state, null, flags));
  }

  final boolean indexHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
  final boolean indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  final boolean indexHasPayloads = fieldInfo.hasPayloads();

  if (indexHasPositions == false || PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS) == false) {
    return new BlockImpactsDocsEnum(fieldInfo, (IntBlockTermState) state);
  }

  if (indexHasPositions &&
      PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS) &&
      (indexHasOffsets == false || PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS) == false) &&
      (indexHasPayloads == false || PostingsEnum.featureRequested(flags, PostingsEnum.PAYLOADS) == false)) {
    return new BlockImpactsPostingsEnum(fieldInfo, (IntBlockTermState) state);
  }

  return new BlockImpactsEverythingEnum(fieldInfo, (IntBlockTermState) state, flags);
}
 
Example 2
Source File: Lucene50PostingsReader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
@Override
public ImpactsEnum impacts(FieldInfo fieldInfo, BlockTermState state, int flags) throws IOException {
  if (state.docFreq <= BLOCK_SIZE || version < Lucene50PostingsFormat.VERSION_IMPACT_SKIP_DATA) {
    // no skip data
    return new SlowImpactsEnum(postings(fieldInfo, state, null, flags));
  }

  final boolean indexHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
  final boolean indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  final boolean indexHasPayloads = fieldInfo.hasPayloads();

  if (indexHasPositions &&
      PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS) &&
      (indexHasOffsets == false || PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS) == false) &&
      (indexHasPayloads == false || PostingsEnum.featureRequested(flags, PostingsEnum.PAYLOADS) == false)) {
    return new BlockImpactsPostingsEnum(fieldInfo, (IntBlockTermState) state);
  }

  return new BlockImpactsEverythingEnum(fieldInfo, (IntBlockTermState) state, flags);
}
 
Example 3
Source File: Lucene84PostingsReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public BlockImpactsPostingsEnum(FieldInfo fieldInfo, IntBlockTermState termState) throws IOException {
  indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  indexHasPayloads = fieldInfo.hasPayloads();

  this.docIn = Lucene84PostingsReader.this.docIn.clone();

  this.posIn = Lucene84PostingsReader.this.posIn.clone();

  docFreq = termState.docFreq;
  docTermStartFP = termState.docStartFP;
  posTermStartFP = termState.posStartFP;
  payTermStartFP = termState.payStartFP;
  totalTermFreq = termState.totalTermFreq;
  docIn.seek(docTermStartFP);
  posPendingFP = posTermStartFP;
  posPendingCount = 0;
  if (termState.totalTermFreq < BLOCK_SIZE) {
    lastPosBlockFP = posTermStartFP;
  } else if (termState.totalTermFreq == BLOCK_SIZE) {
    lastPosBlockFP = -1;
  } else {
    lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset;
  }

  doc = -1;
  accum = 0;
  docUpto = 0;
  docBufferUpto = BLOCK_SIZE;

  skipper = new Lucene84ScoreSkipReader(docIn.clone(),
      MAX_SKIP_LEVELS,
      true,
      indexHasOffsets,
      indexHasPayloads);
  skipper.init(docTermStartFP+termState.skipOffset, docTermStartFP, posTermStartFP, payTermStartFP, docFreq);
}
 
Example 4
Source File: Lucene50PostingsReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public BlockImpactsPostingsEnum(FieldInfo fieldInfo, IntBlockTermState termState) throws IOException {
  indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  indexHasPayloads = fieldInfo.hasPayloads();

  this.docIn = Lucene50PostingsReader.this.docIn.clone();

  encoded = new byte[MAX_ENCODED_SIZE];

  this.posIn = Lucene50PostingsReader.this.posIn.clone();

  docFreq = termState.docFreq;
  docTermStartFP = termState.docStartFP;
  posTermStartFP = termState.posStartFP;
  payTermStartFP = termState.payStartFP;
  totalTermFreq = termState.totalTermFreq;
  docIn.seek(docTermStartFP);
  posPendingFP = posTermStartFP;
  posPendingCount = 0;
  if (termState.totalTermFreq < BLOCK_SIZE) {
    lastPosBlockFP = posTermStartFP;
  } else if (termState.totalTermFreq == BLOCK_SIZE) {
    lastPosBlockFP = -1;
  } else {
    lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset;
  }

  doc = -1;
  accum = 0;
  docUpto = 0;
  docBufferUpto = BLOCK_SIZE;

  skipper = new Lucene50ScoreSkipReader(version,
      docIn.clone(),
      MAX_SKIP_LEVELS,
      true,
      indexHasOffsets,
      indexHasPayloads);
  skipper.init(docTermStartFP+termState.skipOffset, docTermStartFP, posTermStartFP, payTermStartFP, docFreq);
}
 
Example 5
Source File: Lucene50PostingsReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public EverythingEnum(FieldInfo fieldInfo) throws IOException {
  indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  indexHasPayloads = fieldInfo.hasPayloads();

  this.startDocIn = Lucene50PostingsReader.this.docIn;
  this.docIn = null;
  this.posIn = Lucene50PostingsReader.this.posIn.clone();
  if (indexHasOffsets || indexHasPayloads) {
    this.payIn = Lucene50PostingsReader.this.payIn.clone();
  } else {
    this.payIn = null;
  }
  encoded = new byte[MAX_ENCODED_SIZE];
  if (indexHasOffsets) {
    offsetStartDeltaBuffer = new int[MAX_DATA_SIZE];
    offsetLengthBuffer = new int[MAX_DATA_SIZE];
  } else {
    offsetStartDeltaBuffer = null;
    offsetLengthBuffer = null;
    startOffset = -1;
    endOffset = -1;
  }

  if (indexHasPayloads) {
    payloadLengthBuffer = new int[MAX_DATA_SIZE];
    payloadBytes = new byte[128];
    payload = new BytesRef();
  } else {
    payloadLengthBuffer = null;
    payloadBytes = null;
    payload = null;
  }
}
 
Example 6
Source File: Lucene50PostingsReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public BlockDocsEnum(FieldInfo fieldInfo) throws IOException {
  this.startDocIn = Lucene50PostingsReader.this.docIn;
  this.docIn = null;
  indexHasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
  indexHasPos = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
  indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  indexHasPayloads = fieldInfo.hasPayloads();
  encoded = new byte[MAX_ENCODED_SIZE];    
}
 
Example 7
Source File: Lucene50PostingsReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public void decodeTerm(DataInput in, FieldInfo fieldInfo, BlockTermState _termState, boolean absolute)
  throws IOException {
  final IntBlockTermState termState = (IntBlockTermState) _termState;
  final boolean fieldHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
  final boolean fieldHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  final boolean fieldHasPayloads = fieldInfo.hasPayloads();

  if (absolute) {
    termState.docStartFP = 0;
    termState.posStartFP = 0;
    termState.payStartFP = 0;
  }

  termState.docStartFP += in.readVLong();
  if (fieldHasPositions) {
    termState.posStartFP += in.readVLong();
    if (fieldHasOffsets || fieldHasPayloads) {
      termState.payStartFP += in.readVLong();
    }
  }
  if (termState.docFreq == 1) {
    termState.singletonDocID = in.readVInt();
  } else {
    termState.singletonDocID = -1;
  }
  if (fieldHasPositions) {
    if (termState.totalTermFreq > BLOCK_SIZE) {
      termState.lastPosBlockOffset = in.readVLong();
    } else {
      termState.lastPosBlockOffset = -1;
    }
  }
  if (termState.docFreq > BLOCK_SIZE) {
    termState.skipOffset = in.readVLong();
  } else {
    termState.skipOffset = -1;
  }
}
 
Example 8
Source File: Lucene60FieldInfosFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public void write(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, FieldInfos infos, IOContext context) throws IOException {
  final String fileName = IndexFileNames.segmentFileName(segmentInfo.name, segmentSuffix, EXTENSION);
  try (IndexOutput output = directory.createOutput(fileName, context)) {
    CodecUtil.writeIndexHeader(output, Lucene60FieldInfosFormat.CODEC_NAME, Lucene60FieldInfosFormat.FORMAT_CURRENT, segmentInfo.getId(), segmentSuffix);
    output.writeVInt(infos.size());
    for (FieldInfo fi : infos) {
      fi.checkConsistency();

      output.writeString(fi.name);
      output.writeVInt(fi.number);

      byte bits = 0x0;
      if (fi.hasVectors()) bits |= STORE_TERMVECTOR;
      if (fi.omitsNorms()) bits |= OMIT_NORMS;
      if (fi.hasPayloads()) bits |= STORE_PAYLOADS;
      if (fi.isSoftDeletesField()) bits |= SOFT_DELETES_FIELD;
      output.writeByte(bits);

      output.writeByte(indexOptionsByte(fi.getIndexOptions()));

      // pack the DV type and hasNorms in one byte
      output.writeByte(docValuesByte(fi.getDocValuesType()));
      output.writeLong(fi.getDocValuesGen());
      output.writeMapOfStrings(fi.attributes());
      output.writeVInt(fi.getPointDimensionCount());
      if (fi.getPointDimensionCount() != 0) {
        output.writeVInt(fi.getPointIndexDimensionCount());
        output.writeVInt(fi.getPointNumBytes());
      }
    }
    CodecUtil.writeFooter(output);
  }
}
 
Example 9
Source File: DocumentField.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
static DocumentField of(FieldInfo finfo, IndexableField field, IndexReader reader, int docId)
    throws IOException {

  Objects.requireNonNull(finfo);
  Objects.requireNonNull(reader);

  DocumentField dfield = new DocumentField();

  dfield.name = finfo.name;
  dfield.idxOptions = finfo.getIndexOptions();
  dfield.hasTermVectors = finfo.hasVectors();
  dfield.hasPayloads = finfo.hasPayloads();
  dfield.hasNorms = finfo.hasNorms();

  if (finfo.hasNorms()) {
    NumericDocValues norms = MultiDocValues.getNormValues(reader, finfo.name);
    if (norms.advanceExact(docId)) {
      dfield.norm = norms.longValue();
    }
  }

  dfield.dvType = finfo.getDocValuesType();

  dfield.pointDimensionCount = finfo.getPointDimensionCount();
  dfield.pointNumBytes = finfo.getPointNumBytes();

  if (field != null) {
    dfield.isStored = field.fieldType().stored();
    dfield.stringValue = field.stringValue();
    if (field.binaryValue() != null) {
      dfield.binaryValue = BytesRef.deepCopyOf(field.binaryValue());
    }
    dfield.numericValue = field.numericValue();
  }

  return dfield;
}
 
Example 10
Source File: Lucene84PostingsReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public BlockImpactsDocsEnum(FieldInfo fieldInfo, IntBlockTermState termState) throws IOException {
  indexHasFreqs = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
  final boolean indexHasPositions = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
  final boolean indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  final boolean indexHasPayloads = fieldInfo.hasPayloads();

  this.docIn = Lucene84PostingsReader.this.docIn.clone();

  docFreq = termState.docFreq;
  docIn.seek(termState.docStartFP);

  doc = -1;
  accum = 0;
  blockUpto = 0;
  docBufferUpto = BLOCK_SIZE;

  skipper = new Lucene84ScoreSkipReader(docIn.clone(),
      MAX_SKIP_LEVELS,
      indexHasPositions,
      indexHasOffsets,
      indexHasPayloads);
  skipper.init(termState.docStartFP+termState.skipOffset, termState.docStartFP, termState.posStartFP, termState.payStartFP, docFreq);

  // We set the last element of docBuffer to NO_MORE_DOCS, it helps save conditionals in advance()
  docBuffer[BLOCK_SIZE] = NO_MORE_DOCS;
  this.isFreqsRead = true;
  if (indexHasFreqs == false) {
    Arrays.fill(freqBuffer, 1L);
  }
}
 
Example 11
Source File: Lucene84PostingsReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public EverythingEnum(FieldInfo fieldInfo) throws IOException {
  indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  indexHasPayloads = fieldInfo.hasPayloads();

  this.startDocIn = Lucene84PostingsReader.this.docIn;
  this.docIn = null;
  this.posIn = Lucene84PostingsReader.this.posIn.clone();
  if (indexHasOffsets || indexHasPayloads) {
    this.payIn = Lucene84PostingsReader.this.payIn.clone();
  } else {
    this.payIn = null;
  }
  if (indexHasOffsets) {
    offsetStartDeltaBuffer = new long[BLOCK_SIZE];
    offsetLengthBuffer = new long[BLOCK_SIZE];
  } else {
    offsetStartDeltaBuffer = null;
    offsetLengthBuffer = null;
    startOffset = -1;
    endOffset = -1;
  }

  if (indexHasPayloads) {
    payloadLengthBuffer = new long[BLOCK_SIZE];
    payloadBytes = new byte[128];
    payload = new BytesRef();
  } else {
    payloadLengthBuffer = null;
    payloadBytes = null;
    payload = null;
  }

  // We set the last element of docBuffer to NO_MORE_DOCS, it helps save conditionals in advance()
  docBuffer[BLOCK_SIZE] = NO_MORE_DOCS;
}
 
Example 12
Source File: CollapsingQParserPlugin.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
ReaderWrapper(LeafReader leafReader, String field) {
  super(leafReader);

  // TODO can we just do "field" and not bother with the other fields?
  List<FieldInfo> newInfos = new ArrayList<>(in.getFieldInfos().size());
  for (FieldInfo fieldInfo : in.getFieldInfos()) {
    if (fieldInfo.name.equals(field)) {
      FieldInfo f = new FieldInfo(fieldInfo.name,
          fieldInfo.number,
          fieldInfo.hasVectors(),
          fieldInfo.hasNorms(),
          fieldInfo.hasPayloads(),
          fieldInfo.getIndexOptions(),
          DocValuesType.NONE,
          fieldInfo.getDocValuesGen(),
          fieldInfo.attributes(),
          fieldInfo.getPointDimensionCount(),
          fieldInfo.getPointIndexDimensionCount(),
          fieldInfo.getPointNumBytes(),
          fieldInfo.isSoftDeletesField());
      newInfos.add(f);
    } else {
      newInfos.add(fieldInfo);
    }
  }
  FieldInfos infos = new FieldInfos(newInfos.toArray(new FieldInfo[newInfos.size()]));
  this.fieldInfos = infos;
}
 
Example 13
Source File: Lucene84PostingsReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public BlockDocsEnum(FieldInfo fieldInfo) throws IOException {
  this.startDocIn = Lucene84PostingsReader.this.docIn;
  this.docIn = null;
  indexHasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
  indexHasPos = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
  indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  indexHasPayloads = fieldInfo.hasPayloads();
  // We set the last element of docBuffer to NO_MORE_DOCS, it helps save conditionals in advance()
  docBuffer[BLOCK_SIZE] = NO_MORE_DOCS;
}
 
Example 14
Source File: PerFieldMergeState.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
FilterFieldInfos(FieldInfos src, Collection<String> filterFields) {
  // Copy all the input FieldInfo objects since the field numbering must be kept consistent
  super(toArray(src));

  boolean hasVectors = false;
  boolean hasProx = false;
  boolean hasPayloads = false;
  boolean hasOffsets = false;
  boolean hasFreq = false;
  boolean hasNorms = false;
  boolean hasDocValues = false;
  boolean hasPointValues = false;

  this.filteredNames = new HashSet<>(filterFields);
  this.filtered = new ArrayList<>(filterFields.size());
  for (FieldInfo fi : src) {
    if (this.filteredNames.contains(fi.name)) {
      this.filtered.add(fi);
      hasVectors |= fi.hasVectors();
      hasProx |= fi.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
      hasFreq |= fi.getIndexOptions() != IndexOptions.DOCS;
      hasOffsets |= fi.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
      hasNorms |= fi.hasNorms();
      hasDocValues |= fi.getDocValuesType() != DocValuesType.NONE;
      hasPayloads |= fi.hasPayloads();
      hasPointValues |= (fi.getPointDimensionCount() != 0);
    }
  }

  this.filteredHasVectors = hasVectors;
  this.filteredHasProx = hasProx;
  this.filteredHasPayloads = hasPayloads;
  this.filteredHasOffsets = hasOffsets;
  this.filteredHasFreq = hasFreq;
  this.filteredHasNorms = hasNorms;
  this.filteredHasDocValues = hasDocValues;
  this.filteredHasPointValues = hasPointValues;
}
 
Example 15
Source File: PushPostingsWriterBase.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/** 
 * Sets the current field for writing, and returns the
 * fixed length of long[] metadata (which is fixed per
 * field), called when the writing switches to another field. */
@Override
public void setField(FieldInfo fieldInfo) {
  this.fieldInfo = fieldInfo;
  indexOptions = fieldInfo.getIndexOptions();

  writeFreqs = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
  writePositions = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
  writeOffsets = indexOptions.compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;        
  writePayloads = fieldInfo.hasPayloads();

  if (writeFreqs == false) {
    enumFlags = 0;
  } else if (writePositions == false) {
    enumFlags = PostingsEnum.FREQS;
  } else if (writeOffsets == false) {
    if (writePayloads) {
      enumFlags = PostingsEnum.PAYLOADS;
    } else {
      enumFlags = PostingsEnum.POSITIONS;
    }
  } else {
    if (writePayloads) {
      enumFlags = PostingsEnum.PAYLOADS | PostingsEnum.OFFSETS;
    } else {
      enumFlags = PostingsEnum.OFFSETS;
    }
  }
}
 
Example 16
Source File: Lucene50FieldInfosFormat.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
@Override
public void write(Directory directory, SegmentInfo segmentInfo, String segmentSuffix, FieldInfos infos, IOContext context) throws IOException {
  final String fileName = IndexFileNames.segmentFileName(segmentInfo.name, segmentSuffix, EXTENSION);
  try (IndexOutput output = directory.createOutput(fileName, context)) {
    CodecUtil.writeIndexHeader(output, Lucene50FieldInfosFormat.CODEC_NAME, Lucene50FieldInfosFormat.FORMAT_CURRENT, segmentInfo.getId(), segmentSuffix);
    output.writeVInt(infos.size());
    for (FieldInfo fi : infos) {
      fi.checkConsistency();

      output.writeString(fi.name);
      output.writeVInt(fi.number);

      byte bits = 0x0;
      if (fi.hasVectors()) bits |= STORE_TERMVECTOR;
      if (fi.omitsNorms()) bits |= OMIT_NORMS;
      if (fi.hasPayloads()) bits |= STORE_PAYLOADS;
      output.writeByte(bits);

      output.writeByte(indexOptionsByte(fi.getIndexOptions()));

      // pack the DV type and hasNorms in one byte
      output.writeByte(docValuesByte(fi.getDocValuesType()));
      output.writeLong(fi.getDocValuesGen());
      output.writeMapOfStrings(fi.attributes());
    }
    CodecUtil.writeFooter(output);
  }
}
 
Example 17
Source File: Lucene50PostingsReader.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public BlockImpactsEverythingEnum(FieldInfo fieldInfo, IntBlockTermState termState, int flags) throws IOException {
  indexHasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
  indexHasPos = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
  indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  indexHasPayloads = fieldInfo.hasPayloads();
  
  needsPositions = PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS);
  needsOffsets = PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS);
  needsPayloads = PostingsEnum.featureRequested(flags, PostingsEnum.PAYLOADS);
  
  this.docIn = Lucene50PostingsReader.this.docIn.clone();
  
  encoded = new byte[MAX_ENCODED_SIZE];

  if (indexHasPos && needsPositions) {
    this.posIn = Lucene50PostingsReader.this.posIn.clone();
  } else {
    this.posIn = null;
  }
  
  if ((indexHasOffsets && needsOffsets) || (indexHasPayloads && needsPayloads)) {
    this.payIn = Lucene50PostingsReader.this.payIn.clone();
  } else {
    this.payIn = null;
  }
  
  if (indexHasOffsets) {
    offsetStartDeltaBuffer = new int[MAX_DATA_SIZE];
    offsetLengthBuffer = new int[MAX_DATA_SIZE];
  } else {
    offsetStartDeltaBuffer = null;
    offsetLengthBuffer = null;
    startOffset = -1;
    endOffset = -1;
  }

  if (indexHasPayloads) {
    payloadLengthBuffer = new int[MAX_DATA_SIZE];
    payloadBytes = new byte[128];
    payload = new BytesRef();
  } else {
    payloadLengthBuffer = null;
    payloadBytes = null;
    payload = null;
  }

  docFreq = termState.docFreq;
  docTermStartFP = termState.docStartFP;
  posTermStartFP = termState.posStartFP;
  payTermStartFP = termState.payStartFP;
  totalTermFreq = termState.totalTermFreq;
  docIn.seek(docTermStartFP);
  posPendingFP = posTermStartFP;
  payPendingFP = payTermStartFP;
  posPendingCount = 0;
  if (termState.totalTermFreq < BLOCK_SIZE) {
    lastPosBlockFP = posTermStartFP;
  } else if (termState.totalTermFreq == BLOCK_SIZE) {
    lastPosBlockFP = -1;
  } else {
    lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset;
  }

  doc = -1;
  accum = 0;
  docUpto = 0;
  posDocUpTo = 0;
  isFreqsRead = true;
  docBufferUpto = BLOCK_SIZE;

  skipper = new Lucene50ScoreSkipReader(version,
      docIn.clone(),
      MAX_SKIP_LEVELS,
      indexHasPos,
      indexHasOffsets,
      indexHasPayloads);
  skipper.init(docTermStartFP+termState.skipOffset, docTermStartFP, posTermStartFP, payTermStartFP, docFreq);

  if (indexHasFreq == false) {
    Arrays.fill(freqBuffer, 1);
  }
}
 
Example 18
Source File: Lucene84PostingsReader.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public BlockImpactsEverythingEnum(FieldInfo fieldInfo, IntBlockTermState termState, int flags) throws IOException {
  indexHasFreq = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0;
  indexHasPos = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0;
  indexHasOffsets = fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0;
  indexHasPayloads = fieldInfo.hasPayloads();
  
  needsPositions = PostingsEnum.featureRequested(flags, PostingsEnum.POSITIONS);
  needsOffsets = PostingsEnum.featureRequested(flags, PostingsEnum.OFFSETS);
  needsPayloads = PostingsEnum.featureRequested(flags, PostingsEnum.PAYLOADS);
  
  this.docIn = Lucene84PostingsReader.this.docIn.clone();

  if (indexHasPos && needsPositions) {
    this.posIn = Lucene84PostingsReader.this.posIn.clone();
  } else {
    this.posIn = null;
  }
  
  if ((indexHasOffsets && needsOffsets) || (indexHasPayloads && needsPayloads)) {
    this.payIn = Lucene84PostingsReader.this.payIn.clone();
  } else {
    this.payIn = null;
  }
  
  if (indexHasOffsets) {
    offsetStartDeltaBuffer = new long[BLOCK_SIZE];
    offsetLengthBuffer = new long[BLOCK_SIZE];
  } else {
    offsetStartDeltaBuffer = null;
    offsetLengthBuffer = null;
    startOffset = -1;
    endOffset = -1;
  }

  if (indexHasPayloads) {
    payloadLengthBuffer = new long[BLOCK_SIZE];
    payloadBytes = new byte[128];
    payload = new BytesRef();
  } else {
    payloadLengthBuffer = null;
    payloadBytes = null;
    payload = null;
  }

  docFreq = termState.docFreq;
  docTermStartFP = termState.docStartFP;
  posTermStartFP = termState.posStartFP;
  payTermStartFP = termState.payStartFP;
  totalTermFreq = termState.totalTermFreq;
  docIn.seek(docTermStartFP);
  posPendingFP = posTermStartFP;
  payPendingFP = payTermStartFP;
  posPendingCount = 0;
  if (termState.totalTermFreq < BLOCK_SIZE) {
    lastPosBlockFP = posTermStartFP;
  } else if (termState.totalTermFreq == BLOCK_SIZE) {
    lastPosBlockFP = -1;
  } else {
    lastPosBlockFP = posTermStartFP + termState.lastPosBlockOffset;
  }

  doc = -1;
  accum = 0;
  docUpto = 0;
  posDocUpTo = 0;
  isFreqsRead = true;
  docBufferUpto = BLOCK_SIZE;

  skipper = new Lucene84ScoreSkipReader(docIn.clone(),
      MAX_SKIP_LEVELS,
      indexHasPos,
      indexHasOffsets,
      indexHasPayloads);
  skipper.init(docTermStartFP+termState.skipOffset, docTermStartFP, posTermStartFP, payTermStartFP, docFreq);

  if (indexHasFreq == false) {
    for (int i = 0; i < ForUtil.BLOCK_SIZE; ++i) {
      freqBuffer[i] = 1;
    }
  }
}
 
Example 19
Source File: Lucene50PostingsReader.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public boolean canReuse(IndexInput docIn, FieldInfo fieldInfo) {
  return docIn == startDocIn &&
    indexHasFreq == (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS) >= 0) &&
    indexHasPos == (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS) >= 0) &&
    indexHasPayloads == fieldInfo.hasPayloads();
}
 
Example 20
Source File: Lucene50PostingsReader.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
public boolean canReuse(IndexInput docIn, FieldInfo fieldInfo) {
  return docIn == startDocIn &&
    indexHasOffsets == (fieldInfo.getIndexOptions().compareTo(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) >= 0) &&
    indexHasPayloads == fieldInfo.hasPayloads();
}