Java Code Examples for org.apache.lucene.store.IndexInput#readByte()

The following examples show how to use org.apache.lucene.store.IndexInput#readByte() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SimpleTextBKDReader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void visitCompressedDocValues(int[] commonPrefixLengths, byte[] scratchPackedValue, IndexInput in, int[] docIDs, int count, IntersectVisitor visitor, int compressedDim) throws IOException {
  // the byte at `compressedByteOffset` is compressed using run-length compression,
  // other suffix bytes are stored verbatim
  final int compressedByteOffset = compressedDim * bytesPerDim + commonPrefixLengths[compressedDim];
  commonPrefixLengths[compressedDim]++;
  int i;
  for (i = 0; i < count; ) {
    scratchPackedValue[compressedByteOffset] = in.readByte();
    final int runLen = Byte.toUnsignedInt(in.readByte());
    for (int j = 0; j < runLen; ++j) {
      for(int dim = 0; dim< numDims; dim++) {
        int prefix = commonPrefixLengths[dim];
        in.readBytes(scratchPackedValue, dim*bytesPerDim + prefix, bytesPerDim - prefix);
      }
      visitor.visit(docIDs[i+j], scratchPackedValue);
    }
    i += runLen;
  }
  if (i != count) {
    throw new CorruptIndexException("Sub blocks do not add up to the expected count: " + count + " != " + i, in);
  }
}
 
Example 2
Source File: CodecInfo.java    From mtas with Apache License 2.0 6 votes vote down vote up
/**
 * Instantiates a new index doc.
 *
 * @param ref
 *          the ref
 * @throws IOException
 *           Signals that an I/O exception has occurred.
 */
public IndexDoc(Long ref) throws IOException {
  try {
    IndexInput inIndexDoc = indexInputList.get("doc");
    if (ref != null) {
      inIndexDoc.seek(ref);
    }
    docId = inIndexDoc.readVInt(); // docId
    fpIndexObjectId = inIndexDoc.readVLong(); // ref indexObjectId
    fpIndexObjectPosition = inIndexDoc.readVLong(); // ref
                                                    // indexObjectPosition
    fpIndexObjectParent = inIndexDoc.readVLong(); // ref indexObjectParent
    smallestObjectFilepointer = inIndexDoc.readVLong(); // offset
    objectRefApproxQuotient = inIndexDoc.readVInt(); // slope
    objectRefApproxOffset = inIndexDoc.readZLong(); // offset
    storageFlags = inIndexDoc.readByte(); // flag
    size = inIndexDoc.readVInt(); // number of objects
    minPosition = inIndexDoc.readVInt(); // minimum position
    maxPosition = inIndexDoc.readVInt(); // maximum position
  } catch (Exception e) {
    throw new IOException(e);
  }
}
 
Example 3
Source File: BKDReader.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void visitCompressedDocValues(int[] commonPrefixLengths, byte[] scratchPackedValue, IndexInput in, BKDReaderDocIDSetIterator scratchIterator, int count, IntersectVisitor visitor, int compressedDim) throws IOException {
  // the byte at `compressedByteOffset` is compressed using run-length compression,
  // other suffix bytes are stored verbatim
  final int compressedByteOffset = compressedDim * bytesPerDim + commonPrefixLengths[compressedDim];
  commonPrefixLengths[compressedDim]++;
  int i;
  for (i = 0; i < count; ) {
    scratchPackedValue[compressedByteOffset] = in.readByte();
    final int runLen = Byte.toUnsignedInt(in.readByte());
    for (int j = 0; j < runLen; ++j) {
      for(int dim = 0; dim < numDataDims; dim++) {
        int prefix = commonPrefixLengths[dim];
        in.readBytes(scratchPackedValue, dim*bytesPerDim + prefix, bytesPerDim - prefix);
      }
      visitor.visit(scratchIterator.docIDs[i+j], scratchPackedValue);
    }
    i += runLen;
  }
  if (i != count) {
    throw new CorruptIndexException("Sub blocks do not add up to the expected count: " + count + " != " + i, in);
  }
}
 
Example 4
Source File: DocIdsWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** Read {@code count} integers and feed the result directly to {@link IntersectVisitor#visit(int)}. */
static void readInts(IndexInput in, int count, IntersectVisitor visitor) throws IOException {
  final int bpv = in.readByte();
  switch (bpv) {
    case 0:
      readDeltaVInts(in, count, visitor);
      break;
    case 32:
      readInts32(in, count, visitor);
      break;
    case 24:
      readInts24(in, count, visitor);
      break;
    default:
      throw new IOException("Unsupported number of bits per value: " + bpv);
  }
}
 
Example 5
Source File: DocIdsWriter.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** Read {@code count} integers into {@code docIDs}. */
static void readInts(IndexInput in, int count, int[] docIDs) throws IOException {
  final int bpv = in.readByte();
  switch (bpv) {
    case 0:
      readDeltaVInts(in, count, docIDs);
      break;
    case 32:
      readInts32(in, count, docIDs);
      break;
    case 24:
      readInts24(in, count, docIDs);
      break;
    default:
      throw new IOException("Unsupported number of bits per value: " + bpv);
  }
}
 
Example 6
Source File: CodecUtil.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
/** Retrieves the full index header from the provided {@link IndexInput}.
 *  This throws {@link CorruptIndexException} if this file does
 * not appear to be an index file. */
public static byte[] readIndexHeader(IndexInput in) throws IOException {
  in.seek(0);
  final int actualHeader = in.readInt();
  if (actualHeader != CODEC_MAGIC) {
    throw new CorruptIndexException("codec header mismatch: actual header=" + actualHeader + " vs expected header=" + CODEC_MAGIC, in);
  }
  String codec = in.readString();
  in.readInt();
  in.seek(in.getFilePointer() + StringHelper.ID_LENGTH);
  int suffixLength = in.readByte() & 0xFF;
  byte[] bytes = new byte[headerLength(codec) + StringHelper.ID_LENGTH + 1 + suffixLength];
  in.seek(0);
  in.readBytes(bytes, 0, bytes.length);
  return bytes;
}
 
Example 7
Source File: Lucene80NormsProducer.java    From lucene-solr with Apache License 2.0 6 votes vote down vote up
private void readFields(IndexInput meta, FieldInfos infos) throws IOException {
  for (int fieldNumber = meta.readInt(); fieldNumber != -1; fieldNumber = meta.readInt()) {
    FieldInfo info = infos.fieldInfo(fieldNumber);
    if (info == null) {
      throw new CorruptIndexException("Invalid field number: " + fieldNumber, meta);
    } else if (!info.hasNorms()) {
      throw new CorruptIndexException("Invalid field: " + info.name, meta);
    }
    NormsEntry entry = new NormsEntry();
    entry.docsWithFieldOffset = meta.readLong();
    entry.docsWithFieldLength = meta.readLong();
    entry.jumpTableEntryCount = meta.readShort();
    entry.denseRankPower = meta.readByte();
    entry.numDocsWithField = meta.readInt();
    entry.bytesPerNorm = meta.readByte();
    switch (entry.bytesPerNorm) {
      case 0: case 1: case 2: case 4: case 8:
        break;
      default:
        throw new CorruptIndexException("Invalid bytesPerValue: " + entry.bytesPerNorm + ", field: " + info.name, meta);
    }
    entry.normsOffset = meta.readLong();
    norms.put(info.number, entry);
  }
}
 
Example 8
Source File: CodecUtil.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
/**
 * Expert: verifies the incoming {@link IndexInput} has an index header
 * and that its segment ID matches the expected one, and then copies
 * that index header into the provided {@link DataOutput}.  This is
 * useful when building compound files.
 *
 * @param in Input stream, positioned at the point where the
 *        index header was previously written. Typically this is located
 *        at the beginning of the file.
 * @param out Output stream, where the header will be copied to.
 * @param expectedID Expected segment ID
 * @throws CorruptIndexException If the first four bytes are not
 *         {@link #CODEC_MAGIC}, or if the <code>expectedID</code>
 *         does not match.
 * @throws IOException If there is an I/O error reading from the underlying medium.
 *
 * @lucene.internal 
 */
public static void verifyAndCopyIndexHeader(IndexInput in, DataOutput out, byte[] expectedID) throws IOException {
  // make sure it's large enough to have a header and footer
  if (in.length() < footerLength() + headerLength("")) {
    throw new CorruptIndexException("compound sub-files must have a valid codec header and footer: file is too small (" + in.length() + " bytes)", in);
  }

  int actualHeader = in.readInt();
  if (actualHeader != CODEC_MAGIC) {
    throw new CorruptIndexException("compound sub-files must have a valid codec header and footer: codec header mismatch: actual header=" + actualHeader + " vs expected header=" + CodecUtil.CODEC_MAGIC, in);
  }

  // we can't verify these, so we pass-through:
  String codec = in.readString();
  int version = in.readInt();

  // verify id:
  checkIndexHeaderID(in, expectedID);

  // we can't verify extension either, so we pass-through:
  int suffixLength = in.readByte() & 0xFF;
  byte[] suffixBytes = new byte[suffixLength];
  in.readBytes(suffixBytes, 0, suffixLength);

  // now write the header we just verified
  out.writeInt(CodecUtil.CODEC_MAGIC);
  out.writeString(codec);
  out.writeInt(version);
  out.writeBytes(expectedID, 0, expectedID.length);
  out.writeByte((byte) suffixLength);
  out.writeBytes(suffixBytes, 0, suffixLength);
}
 
Example 9
Source File: SimpleTextBKDReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private int readCompressedDim(IndexInput in) throws IOException {
  int compressedDim = in.readByte();
  if (compressedDim < -1 || compressedDim >= numIndexDims) {
    throw new CorruptIndexException("Got compressedDim="+compressedDim, in);
  }
  return compressedDim;
}
 
Example 10
Source File: BKDReader.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private int readCompressedDim(IndexInput in) throws IOException {
  int compressedDim = in.readByte();
  if (compressedDim < -2 || compressedDim >= numDataDims || (version < BKDWriter.VERSION_LOW_CARDINALITY_LEAVES && compressedDim == -2)) {
    throw new CorruptIndexException("Got compressedDim="+compressedDim, in);
  }
  return compressedDim;
}
 
Example 11
Source File: BaseDirectoryTestSuite.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
@Test
public void testLongReadAndClone() throws IOException {
  FSDirectory control = FSDirectory.open(fileControl);
  Directory dir = getControlDir(control, directory);
  String name = writeFile(dir,10*1000*1000);
  IndexInput input = dir.openInput(name, IOContext.DEFAULT);
  readFile(input,1000*1000);
  IndexInput clone = input.clone();
  clone.readByte();
  input.close();
}
 
Example 12
Source File: BaseDirectoryTestSuite.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
private void testEof(String name, Directory directory, long length) throws IOException {
  IndexInput input = directory.openInput(name, IOContext.DEFAULT);
  try {
    input.seek(length);
    input.readByte();
    fail("should throw eof");
  } catch (IOException e) {
  }
}
 
Example 13
Source File: TestIndexWriterExceptions.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
public void testSegmentsChecksumError() throws IOException {
  BaseDirectoryWrapper dir = newDirectory();
  dir.setCheckIndexOnClose(false); // we corrupt the index

  IndexWriter writer = null;

  writer = new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random())));

  // add 100 documents
  for (int i = 0; i < 100; i++) {
    addDoc(writer);
  }

  // close
  writer.close();

  long gen = SegmentInfos.getLastCommitGeneration(dir);
  assertTrue("segment generation should be > 0 but got " + gen, gen > 0);

  final String segmentsFileName = SegmentInfos.getLastCommitSegmentsFileName(dir);
  IndexInput in = dir.openInput(segmentsFileName, newIOContext(random()));
  IndexOutput out = dir.createOutput(IndexFileNames.fileNameFromGeneration(IndexFileNames.SEGMENTS, "", 1+gen), newIOContext(random()));
  out.copyBytes(in, in.length()-1);
  byte b = in.readByte();
  out.writeByte((byte) (1+b));
  out.close();
  in.close();

  expectThrows(CorruptIndexException.class, () -> {
    DirectoryReader.open(dir);
  });

  dir.close();
}
 
Example 14
Source File: BlockDirectoryTest.java    From lucene-solr with Apache License 2.0 5 votes vote down vote up
private void testEof(String name, Directory directory, long length) throws IOException {
  IndexInput input = directory.openInput(name, new IOContext());
  try {
  input.seek(length);
    try {
      input.readByte();
      fail("should throw eof");
    } catch (IOException e) {
    }
  } finally {
    input.close();
  }
}
 
Example 15
Source File: Blur022SegmentInfoReader.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
@Override
public SegmentInfo read(Directory dir, String segment, IOContext context) throws IOException {
  final String fileName = IndexFileNames.segmentFileName(segment, "", Blur022SegmentInfoFormat.SI_EXTENSION);
  final IndexInput input = dir.openInput(fileName, context);
  boolean success = false;
  try {
    CodecUtil.checkHeader(input, Blur022SegmentInfoFormat.CODEC_NAME, Blur022SegmentInfoFormat.VERSION_START,
        Blur022SegmentInfoFormat.VERSION_CURRENT);
    final String version = input.readString();
    final int docCount = input.readInt();
    if (docCount < 0) {
      throw new CorruptIndexException("invalid docCount: " + docCount + " (resource=" + input + ")");
    }
    final boolean isCompoundFile = input.readByte() == SegmentInfo.YES;
    final Map<String, String> diagnostics = input.readStringStringMap();
    final Map<String, String> attributes = input.readStringStringMap();
    final Set<String> files = input.readStringSet();

    if (input.getFilePointer() != input.length()) {
      throw new CorruptIndexException("did not read all bytes from file \"" + fileName + "\": read "
          + input.getFilePointer() + " vs size " + input.length() + " (resource: " + input + ")");
    }

    final SegmentInfo si = new SegmentInfo(dir, version, segment, docCount, isCompoundFile, null, diagnostics,
        Collections.unmodifiableMap(attributes));
    si.setFiles(files);

    success = true;

    return si;

  } finally {
    if (!success) {
      IOUtils.closeWhileHandlingException(input);
    } else {
      input.close();
    }
  }
}
 
Example 16
Source File: DirectPackedReader.java    From incubator-retired-blur with Apache License 2.0 5 votes vote down vote up
@Override
public long get(int index) {
  final long majorBitPos = (long)index * bitsPerValue;
  final long elementPos = majorBitPos >>> 3;
  try {
    IndexInput indexInput = in.get();
    indexInput .seek(startPointer + elementPos);

    final byte b0 = indexInput.readByte();
    final int bitPos = (int) (majorBitPos & 7);
    if (bitPos + bitsPerValue <= 8) {
      // special case: all bits are in the first byte
      return (b0 & ((1L << (8 - bitPos)) - 1)) >>> (8 - bitPos - bitsPerValue);
    }

    // take bits from the first byte
    int remainingBits = bitsPerValue - 8 + bitPos;
    long result = (b0 & ((1L << (8 - bitPos)) - 1)) << remainingBits;

    // add bits from inner bytes
    while (remainingBits >= 8) {
      remainingBits -= 8;
      result |= (indexInput.readByte() & 0xFFL) << remainingBits;
    }

    // take bits from the last byte
    if (remainingBits > 0) {
      result |= (indexInput.readByte() & 0xFFL) >>> (8 - remainingBits);
    }

    return result;
  } catch (IOException ioe) {
    throw new IllegalStateException("failed", ioe);
  }
}
 
Example 17
Source File: BaseCompoundFormatTestCase.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
/** This test opens two files from a compound stream and verifies that
 *  their file positions are independent of each other.
 */
public void testRandomAccessClones() throws IOException {
  Directory dir = newDirectory();
  Directory cr = createLargeCFS(dir);
  
  // Open two files
  IndexInput e1 = cr.openInput("_123.f11", newIOContext(random()));
  IndexInput e2 = cr.openInput("_123.f3", newIOContext(random()));
  
  IndexInput a1 = e1.clone();
  IndexInput a2 = e2.clone();
  
  // Seek the first pair
  e1.seek(100);
  a1.seek(100);
  assertEquals(100, e1.getFilePointer());
  assertEquals(100, a1.getFilePointer());
  byte be1 = e1.readByte();
  byte ba1 = a1.readByte();
  assertEquals(be1, ba1);
  
  // Now seek the second pair
  e2.seek(1027);
  a2.seek(1027);
  assertEquals(1027, e2.getFilePointer());
  assertEquals(1027, a2.getFilePointer());
  byte be2 = e2.readByte();
  byte ba2 = a2.readByte();
  assertEquals(be2, ba2);
  
  // Now make sure the first one didn't move
  assertEquals(101, e1.getFilePointer());
  assertEquals(101, a1.getFilePointer());
  be1 = e1.readByte();
  ba1 = a1.readByte();
  assertEquals(be1, ba1);
  
  // Now more the first one again, past the buffer length
  e1.seek(1910);
  a1.seek(1910);
  assertEquals(1910, e1.getFilePointer());
  assertEquals(1910, a1.getFilePointer());
  be1 = e1.readByte();
  ba1 = a1.readByte();
  assertEquals(be1, ba1);
  
  // Now make sure the second set didn't move
  assertEquals(1028, e2.getFilePointer());
  assertEquals(1028, a2.getFilePointer());
  be2 = e2.readByte();
  ba2 = a2.readByte();
  assertEquals(be2, ba2);
  
  // Move the second set back, again cross the buffer size
  e2.seek(17);
  a2.seek(17);
  assertEquals(17, e2.getFilePointer());
  assertEquals(17, a2.getFilePointer());
  be2 = e2.readByte();
  ba2 = a2.readByte();
  assertEquals(be2, ba2);
  
  // Finally, make sure the first set didn't move
  // Now make sure the first one didn't move
  assertEquals(1911, e1.getFilePointer());
  assertEquals(1911, a1.getFilePointer());
  be1 = e1.readByte();
  ba1 = a1.readByte();
  assertEquals(be1, ba1);
  
  e1.close();
  e2.close();
  a1.close();
  a2.close();
  cr.close();
  dir.close();
}
 
Example 18
Source File: BaseCompoundFormatTestCase.java    From lucene-solr with Apache License 2.0 4 votes vote down vote up
/** This test opens two files from a compound stream and verifies that
 *  their file positions are independent of each other.
 */
public void testRandomAccess() throws IOException {
  Directory dir = newDirectory();
  Directory cr = createLargeCFS(dir);
  
  // Open two files
  IndexInput e1 = dir.openInput("_123.f11", newIOContext(random()));
  IndexInput e2 = dir.openInput("_123.f3", newIOContext(random()));
  
  IndexInput a1 = cr.openInput("_123.f11", newIOContext(random()));
  IndexInput a2 = dir.openInput("_123.f3", newIOContext(random()));
  
  // Seek the first pair
  e1.seek(100);
  a1.seek(100);
  assertEquals(100, e1.getFilePointer());
  assertEquals(100, a1.getFilePointer());
  byte be1 = e1.readByte();
  byte ba1 = a1.readByte();
  assertEquals(be1, ba1);
  
  // Now seek the second pair
  e2.seek(1027);
  a2.seek(1027);
  assertEquals(1027, e2.getFilePointer());
  assertEquals(1027, a2.getFilePointer());
  byte be2 = e2.readByte();
  byte ba2 = a2.readByte();
  assertEquals(be2, ba2);
  
  // Now make sure the first one didn't move
  assertEquals(101, e1.getFilePointer());
  assertEquals(101, a1.getFilePointer());
  be1 = e1.readByte();
  ba1 = a1.readByte();
  assertEquals(be1, ba1);
  
  // Now more the first one again, past the buffer length
  e1.seek(1910);
  a1.seek(1910);
  assertEquals(1910, e1.getFilePointer());
  assertEquals(1910, a1.getFilePointer());
  be1 = e1.readByte();
  ba1 = a1.readByte();
  assertEquals(be1, ba1);
  
  // Now make sure the second set didn't move
  assertEquals(1028, e2.getFilePointer());
  assertEquals(1028, a2.getFilePointer());
  be2 = e2.readByte();
  ba2 = a2.readByte();
  assertEquals(be2, ba2);
  
  // Move the second set back, again cross the buffer size
  e2.seek(17);
  a2.seek(17);
  assertEquals(17, e2.getFilePointer());
  assertEquals(17, a2.getFilePointer());
  be2 = e2.readByte();
  ba2 = a2.readByte();
  assertEquals(be2, ba2);
  
  // Finally, make sure the first set didn't move
  // Now make sure the first one didn't move
  assertEquals(1911, e1.getFilePointer());
  assertEquals(1911, a1.getFilePointer());
  be1 = e1.readByte();
  ba1 = a1.readByte();
  assertEquals(be1, ba1);
  
  e1.close();
  e2.close();
  a1.close();
  a2.close();
  cr.close();
  dir.close();
}
 
Example 19
Source File: BaseDirectoryTestSuite.java    From incubator-retired-blur with Apache License 2.0 4 votes vote down vote up
private void readFile(IndexInput input, long length) throws IOException {
  for (long l = 0;l<length;l++) {
    input.readByte();
  }
}
 
Example 20
Source File: CodecSearchTree.java    From mtas with Apache License 2.0 4 votes vote down vote up
/**
 * Gets the mtas tree item.
 *
 * @param ref the ref
 * @param isSinglePoint the is single point
 * @param isStoreAdditionalIdAndRef the is store additional id and ref
 * @param nodeRefApproxOffset the node ref approx offset
 * @param in the in
 * @param objectRefApproxOffset the object ref approx offset
 * @return the mtas tree item
 * @throws IOException Signals that an I/O exception has occurred.
 */
private static MtasTreeItem getMtasTreeItem(Long ref,
    AtomicBoolean isSinglePoint, AtomicBoolean isStoreAdditionalIdAndRef,
    AtomicLong nodeRefApproxOffset, IndexInput in, long objectRefApproxOffset)
    throws IOException {
  try {
    Boolean isRoot = false;
    if (nodeRefApproxOffset.get() < 0) {
      isRoot = true;
    }
    in.seek(ref);
    if (isRoot) {
      nodeRefApproxOffset.set(in.readVLong());
      Byte flag = in.readByte();
      if ((flag
          & MtasTree.SINGLE_POSITION_TREE) == MtasTree.SINGLE_POSITION_TREE) {
        isSinglePoint.set(true);
      }
      if ((flag
          & MtasTree.STORE_ADDITIONAL_ID) == MtasTree.STORE_ADDITIONAL_ID) {
        isStoreAdditionalIdAndRef.set(true);
      }
    }
    int left = in.readVInt();
    int right = in.readVInt();
    int max = in.readVInt();
    Long leftChild = in.readVLong() + nodeRefApproxOffset.get();
    Long rightChild = in.readVLong() + nodeRefApproxOffset.get();
    int size = 1;
    if (!isSinglePoint.get()) {
      size = in.readVInt();
    }
    // initialize
    long[] objectRefs = new long[size];
    int[] objectAdditionalIds = null;
    long[] objectAdditionalRefs = null;
    // get first
    long objectRef = in.readVLong();
    long objectRefPrevious = objectRef + objectRefApproxOffset;
    objectRefs[0] = objectRefPrevious;
    if (isStoreAdditionalIdAndRef.get()) {
      objectAdditionalIds = new int[size];
      objectAdditionalRefs = new long[size];
      objectAdditionalIds[0] = in.readVInt();
      objectAdditionalRefs[0] = in.readVLong();
    }
    // get others
    for (int t = 1; t < size; t++) {
      objectRef = objectRefPrevious + in.readVLong();
      objectRefs[t] = objectRef;
      objectRefPrevious = objectRef;
      if (isStoreAdditionalIdAndRef.get()) {
        objectAdditionalIds[t] = in.readVInt();
        objectAdditionalRefs[t] = in.readVLong();
      }
    }
    return new MtasTreeItem(left, right, max, objectRefs, objectAdditionalIds,
        objectAdditionalRefs, ref, leftChild, rightChild);
  } catch (Exception e) {
    throw new IOException(e.getMessage());
  }
}