Java Code Examples for org.xerial.snappy.Snappy#maxCompressedLength()

The following examples show how to use org.xerial.snappy.Snappy#maxCompressedLength() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CompressionCodecSnappyJNI.java    From pulsar with Apache License 2.0 6 votes vote down vote up
@Override
public ByteBuf encode(ByteBuf source) {
    int uncompressedLength = source.readableBytes();
    int maxLength = Snappy.maxCompressedLength(uncompressedLength);

    ByteBuffer sourceNio = source.nioBuffer(source.readerIndex(), source.readableBytes());

    ByteBuf target = PooledByteBufAllocator.DEFAULT.buffer(maxLength, maxLength);
    ByteBuffer targetNio = target.nioBuffer(0, maxLength);

    int compressedLength = 0;
    try {
        compressedLength = Snappy.compress(sourceNio, targetNio);
    } catch (IOException e) {
        log.error("Failed to compress to Snappy: {}", e.getMessage());
    }
    target.writerIndex(compressedLength);
    return target;
}
 
Example 2
Source File: SnappyCompressor.java    From Krackle with Apache License 2.0 6 votes vote down vote up
@Override
public int compress(byte[] src, int srcPos, int length, byte[] dest,
	 int destPos) throws IOException {
	System.arraycopy(header, 0, dest, destPos, headerLength);

	// Compressed size cannot be greater than what we have available
	maxCompressedSize = dest.length - destPos - headerLength - 4;
	if (Snappy.maxCompressedLength(length) > maxCompressedSize) {
		return -1;
	}

	compressedLength = Snappy.compress(src, srcPos, length, dest, destPos
		 + headerLength + 4);
	writeInt(compressedLength, dest, destPos + headerLength);

	return headerLength + 4 + compressedLength;
}
 
Example 3
Source File: SnappyCompress.java    From brpc-java with Apache License 2.0 5 votes vote down vote up
@Override
public ByteBuf compressInput(Object proto, RpcMethodInfo rpcMethodInfo) throws IOException {
    byte[] bytes = rpcMethodInfo.inputEncode(proto);
    int maxCompressedSize = Snappy.maxCompressedLength(bytes.length);
    byte[] compressedBytes = new byte[maxCompressedSize];
    int compressedLen = Snappy.compress(bytes, 0, bytes.length, compressedBytes, 0);
    return Unpooled.wrappedBuffer(compressedBytes, 0, compressedLen);
}
 
Example 4
Source File: SnappyCompress.java    From brpc-java with Apache License 2.0 5 votes vote down vote up
@Override
public ByteBuf compressOutput(Object proto, RpcMethodInfo rpcMethodInfo) throws IOException {
    byte[] bytes = rpcMethodInfo.outputEncode(proto);
    int maxCompressedSize = Snappy.maxCompressedLength(bytes.length);
    byte[] compressedBytes = new byte[maxCompressedSize];
    int compressedLen = Snappy.compress(bytes, 0, bytes.length, compressedBytes, 0);
    return Unpooled.wrappedBuffer(compressedBytes, 0, compressedLen);
}
 
Example 5
Source File: VectorAccessibleSerializable.java    From dremio-oss with Apache License 2.0 5 votes vote down vote up
private void writeCompressedBuf(ArrowBuf buf, OutputStream output) throws IOException {
  long rawLength = buf.readableBytes();
  for (long posn = 0; posn < rawLength; posn += RAW_CHUNK_SIZE_TO_COMPRESS) {
    /* we compress 32KB chunks at a time; the last chunk might be smaller than 32KB */
    int lengthToCompress = (int) Math.min(RAW_CHUNK_SIZE_TO_COMPRESS, rawLength - posn);

    /* allocate direct buffers to hold raw and compressed data */
    ByteBuffer rawDirectBuffer = buf.nioBuffer(posn, lengthToCompress);
    /* Since we don't know the exact size of compressed data, we can
     * allocate the compressed buffer of same size as raw data. However,
     * there could be cases where Snappy does not compress the data and the
     * compressed stream is of size larger (raw data + compression metadata)
     * than raw data. To handle these cases, we allocate compressed buffer
     * slightly larger than raw buffer. If we don't do this, Snappy.compress
     * will segfault.
     */
    final int maxCompressedLength = Snappy.maxCompressedLength(lengthToCompress);
    try (ArrowBuf cBuf = allocator.buffer(maxCompressedLength)) {
      ByteBuffer compressedDirectBuffer = cBuf.nioBuffer(0, maxCompressedLength);
      rawDirectBuffer.order(ByteOrder.LITTLE_ENDIAN);
      compressedDirectBuffer.order(ByteOrder.LITTLE_ENDIAN);

      /* compress */
      int compressedLength = Snappy.compress(rawDirectBuffer, compressedDirectBuffer);

      /* get compressed data into byte array for serializing to output stream */
      /* Use current thread buffer (safe to do since I/O operation is blocking) */
      final byte[] tmpBuffer = REUSABLE_LARGE_BUFFER.get();
      compressedDirectBuffer.get(tmpBuffer, 0, compressedLength);

      /* serialize the length of compressed data */
      output.write(getByteArrayFromLEInt(REUSABLE_SMALL_BUFFER.get(), compressedLength));
      /* serialize the compressed data */
      output.write(tmpBuffer, 0, compressedLength);
    }
  }
}
 
Example 6
Source File: SnappyCompressor.java    From parquet-mr with Apache License 2.0 5 votes vote down vote up
/**
 * Fills specified buffer with compressed data. Returns actual number
 * of bytes of compressed data. A return value of 0 indicates that
 * needsInput() should be called in order to determine if more input
 * data is required.
 *
 * @param buffer   Buffer for the compressed data
 * @param off Start offset of the data
 * @param len Size of the buffer
 * @return The actual number of bytes of compressed data.
 */
@Override
public synchronized int compress(byte[] buffer, int off, int len) throws IOException {
  SnappyUtil.validateBuffer(buffer, off, len);

  if (needsInput()) {
    // No buffered output bytes and no input to consume, need more input
    return 0;
  }

  if (!outputBuffer.hasRemaining()) {
    // There is uncompressed input, compress it now
    int maxOutputSize = Snappy.maxCompressedLength(inputBuffer.position());
    if (maxOutputSize > outputBuffer.capacity()) {
      ByteBuffer oldBuffer = outputBuffer;
      outputBuffer = ByteBuffer.allocateDirect(maxOutputSize);
      CleanUtil.cleanDirectBuffer(oldBuffer);
    }
    // Reset the previous outputBuffer
    outputBuffer.clear();
    inputBuffer.limit(inputBuffer.position());
    inputBuffer.position(0);

    int size = Snappy.compress(inputBuffer, outputBuffer);
    outputBuffer.limit(size);
    inputBuffer.limit(0);
    inputBuffer.rewind();
  }

  // Return compressed output up to 'len'
  int numBytes = Math.min(len, outputBuffer.remaining());
  outputBuffer.get(buffer, off, numBytes);    
  bytesWritten += numBytes;
  return numBytes;	    
}
 
Example 7
Source File: SnappyRandomReader.java    From sparkey-java with Apache License 2.0 5 votes vote down vote up
SnappyRandomReader(BlockRandomInput data, int maxBlockSize) {
  this.data = data;
  this.maxBlockSize = maxBlockSize;
  blockSize = 0;
  bufPos = 0;
  uncompressedBuf = new byte[maxBlockSize];
  compressedBuf = new byte[Snappy.maxCompressedLength(maxBlockSize)];
}
 
Example 8
Source File: SnappyReader.java    From sparkey-java with Apache License 2.0 5 votes vote down vote up
public SnappyReader(InputStream data, int maxBlockSize, long start) {
  super(data);
  blockSize = 0;
  bufPos = 0;
  curBlockStart = start;
  nextBlockStart = start;
  uncompressedBuf = new byte[maxBlockSize];
  compressedBuf = new byte[Snappy.maxCompressedLength(maxBlockSize)];
}
 
Example 9
Source File: SnappyOutputStream.java    From sparkey-java with Apache License 2.0 5 votes vote down vote up
SnappyOutputStream(int maxBlockSize, OutputStream output, FileDescriptor fileDescriptor) throws IOException {
  this.fileDescriptor = fileDescriptor;
  if (maxBlockSize < 10) {
    throw new IOException("Too small block size - won't be able to fit keylen + valuelen in a single block");
  }
  this.maxBlockSize = maxBlockSize;
  this.output = output;
  uncompressedBuffer = new byte[maxBlockSize];
  compressedBuffer = new byte[Snappy.maxCompressedLength(maxBlockSize)];
}
 
Example 10
Source File: ICompressor.java    From incubator-iotdb with Apache License 2.0 4 votes vote down vote up
@Override
public int getMaxBytesForCompression(int uncompressedDataSize) {
  return Snappy.maxCompressedLength(uncompressedDataSize);
}
 
Example 11
Source File: SnappyCompressor.java    From stratio-cassandra with Apache License 2.0 4 votes vote down vote up
public int initialCompressedBufferLength(int chunkLength)
{
    return Snappy.maxCompressedLength(chunkLength);
}
 
Example 12
Source File: HashCacheClient.java    From phoenix with BSD 3-Clause "New" or "Revised" License 4 votes vote down vote up
private void serialize(ImmutableBytesWritable ptr, ResultIterator iterator, long estimatedSize, List<Expression> onExpressions) throws SQLException {
    long maxSize = serverCache.getConnection().getQueryServices().getProps().getLong(QueryServices.MAX_SERVER_CACHE_SIZE_ATTRIB, QueryServicesOptions.DEFAULT_MAX_SERVER_CACHE_SIZE);
    estimatedSize = Math.min(estimatedSize, maxSize);
    if (estimatedSize > Integer.MAX_VALUE) {
        throw new IllegalStateException("Estimated size(" + estimatedSize + ") must not be greater than Integer.MAX_VALUE(" + Integer.MAX_VALUE + ")");
    }
    try {
        TrustedByteArrayOutputStream baOut = new TrustedByteArrayOutputStream((int)estimatedSize);
        DataOutputStream out = new DataOutputStream(baOut);
        // Write onExpressions first, for hash key evaluation along with deserialization
        out.writeInt(onExpressions.size());
        for (Expression expression : onExpressions) {
            WritableUtils.writeVInt(out, ExpressionType.valueOf(expression).ordinal());
            expression.write(out);                
        }
        int exprSize = baOut.size() + Bytes.SIZEOF_INT;
        out.writeInt(exprSize);
        int nRows = 0;
        out.writeInt(nRows); // In the end will be replaced with total number of rows            
        for (Tuple result = iterator.next(); result != null; result = iterator.next()) {
            TupleUtil.write(result, out);
            if (baOut.size() > maxSize) {
                throw new MaxServerCacheSizeExceededException("Size of hash cache (" + baOut.size() + " bytes) exceeds the maximum allowed size (" + maxSize + " bytes)");
            }
            nRows++;
        }
        TrustedByteArrayOutputStream sizeOut = new TrustedByteArrayOutputStream(Bytes.SIZEOF_INT);
        DataOutputStream dataOut = new DataOutputStream(sizeOut);
        try {
            dataOut.writeInt(nRows);
            dataOut.flush();
            byte[] cache = baOut.getBuffer();
            // Replace number of rows written above with the correct value.
            System.arraycopy(sizeOut.getBuffer(), 0, cache, exprSize, sizeOut.size());
            // Reallocate to actual size plus compressed buffer size (which is allocated below)
            int maxCompressedSize = Snappy.maxCompressedLength(baOut.size());
            byte[] compressed = new byte[maxCompressedSize]; // size for worst case
            int compressedSize = Snappy.compress(baOut.getBuffer(), 0, baOut.size(), compressed, 0);
            // Last realloc to size of compressed buffer.
            ptr.set(compressed,0,compressedSize);
        } finally {
            dataOut.close();
        }
    } catch (IOException e) {
        throw ServerUtil.parseServerException(e);
    } finally {
        iterator.close();
    }
}
 
Example 13
Source File: SpliceSnappy.java    From spliceengine with GNU Affero General Public License v3.0 4 votes vote down vote up
public static int maxCompressedLength(int byteSize) {
    return installed ? Snappy.maxCompressedLength(byteSize) : byteSize;
}