Java Code Examples for io.netty.buffer.ByteBuf#readRetainedSlice()

The following examples show how to use io.netty.buffer.ByteBuf#readRetainedSlice() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ZstdDecoderTest.java    From x-pipe with Apache License 2.0 6 votes vote down vote up
@Test
public void testTcpPacketSplitFraming() {
    int N = randomInt(5, 10);

    ByteBuf decompressed = Unpooled.buffer(MIN_BLOCK_SIZE + 16);
    String sample = randomString(MIN_BLOCK_SIZE + 16);
    ByteBuf compressed = getCompressedByteBuf(sample);

    while(compressed.isReadable()) {
        ByteBuf piece = compressed.readRetainedSlice(Math.min(randomInt(1, N), compressed.readableBytes()));
        channel.writeInbound(piece);
    }

    mergeOutput(decompressed);

    Assert.assertEquals(sample, decompressed.toString(Charset.defaultCharset()));

}
 
Example 2
Source File: FileWriter.java    From curiostack with MIT License 6 votes vote down vote up
public ListenableFuture<Void> write(ByteBuf nextBuf) {
  final ByteBuf buf;
  if (unfinishedChunk == null) {
    buf = nextBuf;
  } else {
    buf =
        alloc.compositeBuffer(2).addComponent(true, unfinishedChunk).addComponent(true, nextBuf);
    unfinishedChunk = null;
  }

  int alignedWritableBytes = alignedSize(buf.readableBytes());
  if (alignedWritableBytes == buf.readableBytes()) {
    return CompletableFuturesExtra.toListenableFuture(uploadChunk(buf, false));
  }

  if (alignedWritableBytes == 0) {
    // Not enough data for a chunk, so copy it for next time.
    copyUnfinishedBuffer(buf);
    return immediateFuture(null);
  }

  ByteBuf nextChunk = buf.readRetainedSlice(alignedWritableBytes);
  copyUnfinishedBuffer(buf);
  return CompletableFuturesExtra.toListenableFuture(uploadChunk(nextChunk, false));
}
 
Example 3
Source File: AbstractCompatibleMarshallingDecoderTest.java    From netty-4.1.22 with Apache License 2.0 5 votes vote down vote up
@Test
public void testFragmentedUnmarshalling() throws IOException {
    MarshallerFactory marshallerFactory = createMarshallerFactory();
    MarshallingConfiguration configuration = createMarshallingConfig();

    EmbeddedChannel ch = new EmbeddedChannel(createDecoder(Integer.MAX_VALUE));

    ByteArrayOutputStream bout = new ByteArrayOutputStream();
    Marshaller marshaller = marshallerFactory.createMarshaller(configuration);
    marshaller.start(Marshalling.createByteOutput(bout));
    marshaller.writeObject(testObject);
    marshaller.finish();
    marshaller.close();

    byte[] testBytes = bout.toByteArray();

    ByteBuf buffer = input(testBytes);
    ByteBuf slice = buffer.readRetainedSlice(2);

    ch.writeInbound(slice);
    ch.writeInbound(buffer);
    assertTrue(ch.finish());

    String unmarshalled = ch.readInbound();

    assertEquals(testObject, unmarshalled);

    assertNull(ch.readInbound());
}
 
Example 4
Source File: ZstdDecoderTest.java    From x-pipe with Apache License 2.0 5 votes vote down vote up
@Test
public void testTcpPacketMergedFraming() {

    ByteBuf decompressed = Unpooled.buffer(MIN_BLOCK_SIZE * 2 + 16);
    String sample1 = randomString(MIN_BLOCK_SIZE + 16);
    ByteBuf compressed1 = getCompressedByteBuf(sample1);

    String sample2 = randomString(MIN_BLOCK_SIZE);
    ByteBuf compressed2 = getCompressedByteBuf(sample2);

    int expectLength = compressed1.readableBytes() + compressed2.readableBytes();
    int totalLength = 0;
    int randomLength = randomInt(1, compressed1.readableBytes() - 3);
    totalLength += randomLength;
    ByteBuf piece = compressed1.readRetainedSlice(randomLength);
    channel.writeInbound(piece);

    randomLength = randomInt(HEADER_LENGTH + 5, compressed2.readableBytes() - 3);
    piece = Unpooled.directBuffer(compressed1.readableBytes() + randomLength);
    piece.writeBytes(compressed1);
    piece.writeBytes(compressed2.readSlice(randomLength));

    totalLength += piece.readableBytes();
    channel.writeInbound(piece);

    totalLength += compressed2.readableBytes();
    channel.writeInbound(compressed2);

    Assert.assertEquals(expectLength, totalLength);

    mergeOutput(decompressed);

    Assert.assertEquals(sample1 + sample2, decompressed.toString(Charset.defaultCharset()));

}
 
Example 5
Source File: CloudStorageBuildCacheService.java    From curiostack with MIT License 5 votes vote down vote up
@Override
public void store(BuildCacheKey buildCacheKey, BuildCacheEntryWriter buildCacheEntryWriter) {
  ByteBuf buf = PooledByteBufAllocator.DEFAULT.buffer((int) buildCacheEntryWriter.getSize());

  try {
    try (ByteBufOutputStream os = new ByteBufOutputStream(buf)) {
      buildCacheEntryWriter.writeTo(os);
    } catch (IOException e) {
      logger.warn("Couldn't write cache entry to buffer.", e);
      buf.release();
      return;
    }

    FileWriter writer =
        cloudStorage.createFile(buildCacheKey.getHashCode(), ImmutableMap.of()).join();
    while (buf.readableBytes() > 0) {
      ByteBuf chunk = buf.readRetainedSlice(Math.min(buf.readableBytes(), 10 * 4 * 256 * 1000));
      if (buf.readableBytes() > 0) {
        getUnchecked(writer.write(chunk));
      } else {
        writer.writeAndClose(chunk).join();
      }
    }
  } catch (Throwable t) {
    logger.warn("Exception writing to cloud storage, ignoring.", t);
  } finally {
    buf.release();
  }
}
 
Example 6
Source File: LargeFieldReader.java    From r2dbc-mysql with Apache License 2.0 5 votes vote down vote up
@Override
public FieldValue readSizeFixedField(int length) {
    require(length > 0, "length must be a positive integer");

    ByteBuf buf = nonEmptyBuffer();

    if (buf.readableBytes() >= length) {
        return new NormalFieldValue(buf.readRetainedSlice(length));
    }

    return new NormalFieldValue(retainedMerge(buf.alloc(), readSlice(buf, length)));
}
 
Example 7
Source File: NormalFieldReader.java    From r2dbc-mysql with Apache License 2.0 5 votes vote down vote up
private static ByteBuf readVarIntSizedRetained(ByteBuf buf) {
    int size = (int) VarIntUtils.readVarInt(buf);
    if (size == 0) {
        // Use EmptyByteBuf, new buffer no need to be retained.
        return buf.alloc().buffer(0, 0);
    }

    return buf.readRetainedSlice(size);
}
 
Example 8
Source File: SimpleDecodeHandler.java    From joyrpc with Apache License 2.0 5 votes vote down vote up
@Override
protected void decode(final ChannelHandlerContext ctx, final ByteBuf in, final List<Object> out) throws Exception {
    if (!in.isReadable()) {
        logger.warn("Bytebuf is not readable when decode!");
        return;
    }
    ByteBuf byteBuf;
    boolean needRelease = false;
    if (fixedLength > 0) {
        byteBuf = in.readableBytes() < fixedLength ? null : in.readRetainedSlice(fixedLength);
        needRelease = true;
    } else {
        byteBuf = in;
    }
    if (byteBuf != null) {
        ChannelBuffer buf = new NettyChannelBuffer(byteBuf);
        try {
            Object message = codec.decode(() -> channel, buf);
            if (message != null) {
                out.add(message);
            }
        } finally {
            if (needRelease && !buf.isReleased()) {
                buf.release();
            }
        }

    }
}
 
Example 9
Source File: AltsTsiFrameProtector.java    From grpc-nebula-java with Apache License 2.0 5 votes vote down vote up
private void addUnhandled(ByteBuf in) {
  if (in.isReadable()) {
    ByteBuf buf = in.readRetainedSlice(in.readableBytes());
    unhandledBufs.add(buf);
    unhandledBytes += buf.readableBytes();
  }
}
 
Example 10
Source File: FixedLengthFrameDecoder.java    From netty-4.1.22 with Apache License 2.0 5 votes vote down vote up
/**
 * Create a frame out of the {@link ByteBuf} and return it.从ByteBuf创建一个框架并返回它。
 *
 * @param   ctx             the {@link ChannelHandlerContext} which this {@link ByteToMessageDecoder} belongs to
 * @param   in              the {@link ByteBuf} from which to read data
 * @return  frame           the {@link ByteBuf} which represent the frame or {@code null} if no frame could
 *                          be created.
 */
protected Object decode(
        @SuppressWarnings("UnusedParameters") ChannelHandlerContext ctx, ByteBuf in) throws Exception {
    if (in.readableBytes() < frameLength) {
        return null;
    } else {
        return in.readRetainedSlice(frameLength);
    }
}
 
Example 11
Source File: AltsTsiFrameProtector.java    From grpc-java with Apache License 2.0 5 votes vote down vote up
private void addUnhandled(ByteBuf in) {
  if (in.isReadable()) {
    ByteBuf buf = in.readRetainedSlice(in.readableBytes());
    unhandledBufs.add(buf);
    unhandledBytes += buf.readableBytes();
  }
}
 
Example 12
Source File: LargeFieldReader.java    From r2dbc-mysql with Apache License 2.0 5 votes vote down vote up
@Override
public FieldValue readSizeFixedField(int length) {
    require(length > 0, "length must be a positive integer");

    ByteBuf buf = nonEmptyBuffer();

    if (buf.readableBytes() >= length) {
        return new NormalFieldValue(buf.readRetainedSlice(length));
    }

    return new NormalFieldValue(retainedMerge(buf.alloc(), readSlice(buf, length)));
}
 
Example 13
Source File: NormalFieldReader.java    From r2dbc-mysql with Apache License 2.0 5 votes vote down vote up
private static ByteBuf readVarIntSizedRetained(ByteBuf buf) {
    int size = (int) VarIntUtils.readVarInt(buf);
    if (size == 0) {
        // Use EmptyByteBuf, new buffer no need to be retained.
        return buf.alloc().buffer(0, 0);
    }

    return buf.readRetainedSlice(size);
}
 
Example 14
Source File: DelimiterBasedFrameDecoder.java    From netty-4.1.22 with Apache License 2.0 4 votes vote down vote up
/**
     * Create a frame out of the {@link ByteBuf} and return it.从ByteBuf创建一个框架并返回它。
     *
     * @param   ctx             the {@link ChannelHandlerContext} which this {@link ByteToMessageDecoder} belongs to
     * @param   buffer          the {@link ByteBuf} from which to read data
     * @return  frame           the {@link ByteBuf} which represent the frame or {@code null} if no frame could
     *                          be created.
     */
    protected Object decode(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception {
        if (lineBasedDecoder != null) {
            return lineBasedDecoder.decode(ctx, buffer);
        }
        // Try all delimiters and choose the delimiter which yields the shortest frame.
        int minFrameLength = Integer.MAX_VALUE;
        ByteBuf minDelim = null;
        for (ByteBuf delim: delimiters) {
//            找到分隔符在buffer中的下标
            int frameLength = indexOf(buffer, delim);
            if (frameLength >= 0 && frameLength < minFrameLength) {
//                minFrameLength存储的是分隔符的下标位置
                minFrameLength = frameLength;
//                minDelim存储的是分隔符
                minDelim = delim;
            }
        }

        if (minDelim != null) {
//            返回分隔符的长度
            int minDelimLength = minDelim.capacity();
            ByteBuf frame;

            if (discardingTooLongFrame) {
                // We've just finished discarding a very large frame.
                // Go back to the initial state.
                discardingTooLongFrame = false;
//                跳过分隔符在buffer中的长度
                buffer.skipBytes(minFrameLength + minDelimLength);

                int tooLongFrameLength = this.tooLongFrameLength;
                this.tooLongFrameLength = 0;
                if (!failFast) {
//                    采用failfast原则
                    fail(tooLongFrameLength);
                }
                return null;
            }

            if (minFrameLength > maxFrameLength) {
                // Discard read frame.
                buffer.skipBytes(minFrameLength + minDelimLength);
                fail(minFrameLength);
                return null;
            }

            if (stripDelimiter) {
                frame = buffer.readRetainedSlice(minFrameLength);
                buffer.skipBytes(minDelimLength);
            } else {
                frame = buffer.readRetainedSlice(minFrameLength + minDelimLength);
            }

            return frame;
        } else {
            if (!discardingTooLongFrame) {
                if (buffer.readableBytes() > maxFrameLength) {
                    // Discard the content of the buffer until a delimiter is found.
                    tooLongFrameLength = buffer.readableBytes();
                    buffer.skipBytes(buffer.readableBytes());
                    discardingTooLongFrame = true;
                    if (failFast) {
                        fail(tooLongFrameLength);
                    }
                }
            } else {
                // Still discarding the buffer since a delimiter is not found.
                tooLongFrameLength += buffer.readableBytes();
                buffer.skipBytes(buffer.readableBytes());
            }
            return null;
        }
    }
 
Example 15
Source File: DefaultHttp2FrameWriter.java    From netty-4.1.22 with Apache License 2.0 4 votes vote down vote up
private ChannelFuture writeHeadersInternal(ChannelHandlerContext ctx,
        int streamId, Http2Headers headers, int padding, boolean endStream,
        boolean hasPriority, int streamDependency, short weight, boolean exclusive, ChannelPromise promise) {
    ByteBuf headerBlock = null;
    SimpleChannelPromiseAggregator promiseAggregator =
            new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor());
    try {
        verifyStreamId(streamId, STREAM_ID);
        if (hasPriority) {
            verifyStreamOrConnectionId(streamDependency, STREAM_DEPENDENCY);
            verifyPadding(padding);
            verifyWeight(weight);
        }

        // Encode the entire header block.
        headerBlock = ctx.alloc().buffer();
        headersEncoder.encodeHeaders(streamId, headers, headerBlock);

        Http2Flags flags =
                new Http2Flags().endOfStream(endStream).priorityPresent(hasPriority).paddingPresent(padding > 0);

        // Read the first fragment (possibly everything).
        int nonFragmentBytes = padding + flags.getNumPriorityBytes();
        int maxFragmentLength = maxFrameSize - nonFragmentBytes;
        ByteBuf fragment = headerBlock.readRetainedSlice(min(headerBlock.readableBytes(), maxFragmentLength));

        // Set the end of headers flag for the first frame.
        flags.endOfHeaders(!headerBlock.isReadable());

        int payloadLength = fragment.readableBytes() + nonFragmentBytes;
        ByteBuf buf = ctx.alloc().buffer(HEADERS_FRAME_HEADER_LENGTH);
        writeFrameHeaderInternal(buf, payloadLength, HEADERS, flags, streamId);
        writePaddingLength(buf, padding);

        if (hasPriority) {
            buf.writeInt(exclusive ? (int) (0x80000000L | streamDependency) : streamDependency);

            // Adjust the weight so that it fits into a single byte on the wire.
            buf.writeByte(weight - 1);
        }
        ctx.write(buf, promiseAggregator.newPromise());

        // Write the first fragment.
        ctx.write(fragment, promiseAggregator.newPromise());

        // Write out the padding, if any.
        if (paddingBytes(padding) > 0) {
            ctx.write(ZERO_BUFFER.slice(0, paddingBytes(padding)), promiseAggregator.newPromise());
        }

        if (!flags.endOfHeaders()) {
            writeContinuationFrames(ctx, streamId, headerBlock, padding, promiseAggregator);
        }
    } catch (Http2Exception e) {
        promiseAggregator.setFailure(e);
    } catch (Throwable t) {
        promiseAggregator.setFailure(t);
        promiseAggregator.doneAllocatingPromises();
        PlatformDependent.throwException(t);
    } finally {
        if (headerBlock != null) {
            headerBlock.release();
        }
    }
    return promiseAggregator.doneAllocatingPromises();
}
 
Example 16
Source File: DefaultHttp2FrameWriter.java    From netty-4.1.22 with Apache License 2.0 4 votes vote down vote up
@Override
public ChannelFuture writePushPromise(ChannelHandlerContext ctx, int streamId,
        int promisedStreamId, Http2Headers headers, int padding, ChannelPromise promise) {
    ByteBuf headerBlock = null;
    SimpleChannelPromiseAggregator promiseAggregator =
            new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor());
    try {
        verifyStreamId(streamId, STREAM_ID);
        verifyStreamId(promisedStreamId, "Promised Stream ID");
        verifyPadding(padding);

        // Encode the entire header block into an intermediate buffer.
        headerBlock = ctx.alloc().buffer();
        headersEncoder.encodeHeaders(streamId, headers, headerBlock);

        // Read the first fragment (possibly everything).
        Http2Flags flags = new Http2Flags().paddingPresent(padding > 0);
        // INT_FIELD_LENGTH is for the length of the promisedStreamId
        int nonFragmentLength = INT_FIELD_LENGTH + padding;
        int maxFragmentLength = maxFrameSize - nonFragmentLength;
        ByteBuf fragment = headerBlock.readRetainedSlice(min(headerBlock.readableBytes(), maxFragmentLength));

        flags.endOfHeaders(!headerBlock.isReadable());

        int payloadLength = fragment.readableBytes() + nonFragmentLength;
        ByteBuf buf = ctx.alloc().buffer(PUSH_PROMISE_FRAME_HEADER_LENGTH);
        writeFrameHeaderInternal(buf, payloadLength, PUSH_PROMISE, flags, streamId);
        writePaddingLength(buf, padding);

        // Write out the promised stream ID.
        buf.writeInt(promisedStreamId);
        ctx.write(buf, promiseAggregator.newPromise());

        // Write the first fragment.
        ctx.write(fragment, promiseAggregator.newPromise());

        // Write out the padding, if any.
        if (paddingBytes(padding) > 0) {
            ctx.write(ZERO_BUFFER.slice(0, paddingBytes(padding)), promiseAggregator.newPromise());
        }

        if (!flags.endOfHeaders()) {
            writeContinuationFrames(ctx, streamId, headerBlock, padding, promiseAggregator);
        }
    } catch (Http2Exception e) {
        promiseAggregator.setFailure(e);
    } catch (Throwable t) {
        promiseAggregator.setFailure(t);
        promiseAggregator.doneAllocatingPromises();
        PlatformDependent.throwException(t);
    } finally {
        if (headerBlock != null) {
            headerBlock.release();
        }
    }
    return promiseAggregator.doneAllocatingPromises();
}
 
Example 17
Source File: MqttDecoder.java    From mithqtt with Apache License 2.0 4 votes vote down vote up
private static Result<ByteBuf> decodePublishPayload(ByteBuf buffer, int bytesRemainingInVariablePart) {
    ByteBuf b = buffer.readRetainedSlice(bytesRemainingInVariablePart);
    return new Result<>(b, bytesRemainingInVariablePart);
}
 
Example 18
Source File: MqttDecoder.java    From netty-4.1.22 with Apache License 2.0 4 votes vote down vote up
private static Result<ByteBuf> decodePublishPayload(ByteBuf buffer, int bytesRemainingInVariablePart) {
    ByteBuf b = buffer.readRetainedSlice(bytesRemainingInVariablePart);
    return new Result<ByteBuf>(b, bytesRemainingInVariablePart);
}
 
Example 19
Source File: HAProxyMessage.java    From netty-4.1.22 with Apache License 2.0 4 votes vote down vote up
private static HAProxyTLV readNextTLV(final ByteBuf header) {

        // We need at least 4 bytes for a TLV
        if (header.readableBytes() < 4) {
            return null;
        }

        final byte typeAsByte = header.readByte();
        final HAProxyTLV.Type type = HAProxyTLV.Type.typeForByteValue(typeAsByte);

        final int length = header.readUnsignedShort();
        switch (type) {
        case PP2_TYPE_SSL:
            final ByteBuf rawContent = header.retainedSlice(header.readerIndex(), length);
            final ByteBuf byteBuf = header.readSlice(length);
            final byte client = byteBuf.readByte();
            final int verify = byteBuf.readInt();

            if (byteBuf.readableBytes() >= 4) {

                final List<HAProxyTLV> encapsulatedTlvs = new ArrayList<HAProxyTLV>(4);
                do {
                    final HAProxyTLV haProxyTLV = readNextTLV(byteBuf);
                    if (haProxyTLV == null) {
                        break;
                    }
                    encapsulatedTlvs.add(haProxyTLV);
                } while (byteBuf.readableBytes() >= 4);

                return new HAProxySSLTLV(verify, client, encapsulatedTlvs, rawContent);
            }
            return new HAProxySSLTLV(verify, client, Collections.<HAProxyTLV>emptyList(), rawContent);
        // If we're not dealing with a SSL Type, we can use the same mechanism
        case PP2_TYPE_ALPN:
        case PP2_TYPE_AUTHORITY:
        case PP2_TYPE_SSL_VERSION:
        case PP2_TYPE_SSL_CN:
        case PP2_TYPE_NETNS:
        case OTHER:
            return new HAProxyTLV(type, typeAsByte, header.readRetainedSlice(length));
        default:
            return null;
        }
    }
 
Example 20
Source File: CompositeMetadataCodec.java    From rsocket-java with Apache License 2.0 4 votes vote down vote up
/**
 * Decode the next metadata entry (a mime header + content pair of {@link ByteBuf}) from a {@link
 * ByteBuf} that contains at least enough bytes for one more such entry. These buffers are
 * actually slices of the full metadata buffer, and this method doesn't move the full metadata
 * buffer's {@link ByteBuf#readerIndex()}. As such, it requires the user to provide an {@code
 * index} to read from. The next index is computed by calling {@link #computeNextEntryIndex(int,
 * ByteBuf, ByteBuf)}. Size of the first buffer (the "header buffer") drives which decoding method
 * should be further applied to it.
 *
 * <p>The header buffer is either:
 *
 * <ul>
 *   <li>made up of a single byte: this represents an encoded mime id, which can be further
 *       decoded using {@link #decodeMimeIdFromMimeBuffer(ByteBuf)}
 *   <li>made up of 2 or more bytes: this represents an encoded mime String + its length, which
 *       can be further decoded using {@link #decodeMimeTypeFromMimeBuffer(ByteBuf)}. Note the
 *       encoded length, in the first byte, is skipped by this decoding method because the
 *       remaining length of the buffer is that of the mime string.
 * </ul>
 *
 * @param compositeMetadata the source {@link ByteBuf} that originally contains one or more
 *     metadata entries
 * @param entryIndex the {@link ByteBuf#readerIndex()} to start decoding from. original reader
 *     index is kept on the source buffer
 * @param retainSlices should produced metadata entry buffers {@link ByteBuf#slice() slices} be
 *     {@link ByteBuf#retainedSlice() retained}?
 * @return a {@link ByteBuf} array of length 2 containing the mime header buffer
 *     <strong>slice</strong> and the content buffer <strong>slice</strong>, or one of the
 *     zero-length error constant arrays
 */
public static ByteBuf[] decodeMimeAndContentBuffersSlices(
    ByteBuf compositeMetadata, int entryIndex, boolean retainSlices) {
  compositeMetadata.markReaderIndex();
  compositeMetadata.readerIndex(entryIndex);

  if (compositeMetadata.isReadable()) {
    ByteBuf mime;
    int ridx = compositeMetadata.readerIndex();
    byte mimeIdOrLength = compositeMetadata.readByte();
    if ((mimeIdOrLength & STREAM_METADATA_KNOWN_MASK) == STREAM_METADATA_KNOWN_MASK) {
      mime =
          retainSlices
              ? compositeMetadata.retainedSlice(ridx, 1)
              : compositeMetadata.slice(ridx, 1);
    } else {
      // M flag unset, remaining 7 bits are the length of the mime
      int mimeLength = Byte.toUnsignedInt(mimeIdOrLength) + 1;

      if (compositeMetadata.isReadable(
          mimeLength)) { // need to be able to read an extra mimeLength bytes
        // here we need a way for the returned ByteBuf to differentiate between a
        // 1-byte length mime type and a 1 byte encoded mime id, preferably without
        // re-applying the byte mask. The easiest way is to include the initial byte
        // and have further decoding ignore the first byte. 1 byte buffer == id, 2+ byte
        // buffer == full mime string.
        mime =
            retainSlices
                ?
                // we accommodate that we don't read from current readerIndex, but
                // readerIndex - 1 ("0"), for a total slice size of mimeLength + 1
                compositeMetadata.retainedSlice(ridx, mimeLength + 1)
                : compositeMetadata.slice(ridx, mimeLength + 1);
        // we thus need to skip the bytes we just sliced, but not the flag/length byte
        // which was already skipped in initial read
        compositeMetadata.skipBytes(mimeLength);
      } else {
        compositeMetadata.resetReaderIndex();
        throw new IllegalStateException("metadata is malformed");
      }
    }

    if (compositeMetadata.isReadable(3)) {
      // ensures the length medium can be read
      final int metadataLength = compositeMetadata.readUnsignedMedium();
      if (compositeMetadata.isReadable(metadataLength)) {
        ByteBuf metadata =
            retainSlices
                ? compositeMetadata.readRetainedSlice(metadataLength)
                : compositeMetadata.readSlice(metadataLength);
        compositeMetadata.resetReaderIndex();
        return new ByteBuf[] {mime, metadata};
      } else {
        compositeMetadata.resetReaderIndex();
        throw new IllegalStateException("metadata is malformed");
      }
    } else {
      compositeMetadata.resetReaderIndex();
      throw new IllegalStateException("metadata is malformed");
    }
  }
  compositeMetadata.resetReaderIndex();
  throw new IllegalArgumentException(
      String.format("entry index %d is larger than buffer size", entryIndex));
}