Java Code Examples for com.google.common.io.CountingOutputStream#getCount()

The following examples show how to use com.google.common.io.CountingOutputStream#getCount() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: GuavaCountingOutputStreamUnitTest.java    From tutorials with MIT License 6 votes vote down vote up
@Test(expected = RuntimeException.class)
public void givenData_whenCountReachesLimit_thenThrowException() throws Exception {
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    CountingOutputStream cos = new CountingOutputStream(out);

    byte[] data = new byte[1024];
    ByteArrayInputStream in = new ByteArrayInputStream(data);

    int b;
    while ((b = in.read()) != -1) {
        cos.write(b);
        if (cos.getCount() >= MAX) {
            throw new RuntimeException("Write limit reached");
        }
    }
}
 
Example 2
Source File: EntryAccounting.java    From buck with Apache License 2.0 6 votes vote down vote up
private long writeDataDescriptor(OutputStream rawOut) throws IOException {
  if (!requiresDataDescriptor()) {
    return 0;
  }

  CountingOutputStream out = new CountingOutputStream(rawOut);
  ByteIo.writeInt(out, ZipEntry.EXTSIG);
  ByteIo.writeInt(out, getCrc());
  if (getCompressedSize() >= ZipConstants.ZIP64_MAGICVAL
      || getSize() >= ZipConstants.ZIP64_MAGICVAL) {
    ByteIo.writeLong(out, getCompressedSize());
    ByteIo.writeLong(out, getSize());
  } else {
    ByteIo.writeInt(out, getCompressedSize());
    ByteIo.writeInt(out, getSize());
  }
  return out.getCount();
}
 
Example 3
Source File: LZ4CompressColumnTest.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public int writeCompressedData(int rowCnt) throws IOException {
    int compressBolckSize = 64 * 1024;
    CountingOutputStream countingOutputStream = new CountingOutputStream(new FileOutputStream(tmpColFile));
    LZ4CompressedColumnWriter writer = new LZ4CompressedColumnWriter(4, rowCnt, compressBolckSize,
            countingOutputStream);
    int[] colValues = new int[rowCnt];
    for (int i = 0; i < rowCnt; i++) {
        colValues[i] = i;
    }
    for (int i = 0; i < rowCnt; i++) {
        writer.write(Bytes.toBytes(colValues[i]));
    }
    writer.flush();
    return (int) countingOutputStream.getCount();
}
 
Example 4
Source File: RunLengthCompressColumnTest.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public int writeCompressData1(int rowCnt) throws IOException {
    int compressBolckSize = 64 * 1024;
    CountingOutputStream countingOutputStream = new CountingOutputStream(new FileOutputStream(tmpColFile));
    RunLengthCompressedColumnWriter writer = new RunLengthCompressedColumnWriter(4, rowCnt, compressBolckSize,
            countingOutputStream);
    int[] colValues = new int[rowCnt];
    for (int i = 0; i < rowCnt; i++) {
        colValues[i] = i;
    }
    for (int i = 0; i < rowCnt; i++) {
        writer.write(Bytes.toBytes(colValues[i]));
    }
    writer.flush();
    return (int) countingOutputStream.getCount();
}
 
Example 5
Source File: BaseMethodStatsInterceptor.java    From datawave with Apache License 2.0 5 votes vote down vote up
protected ResponseMethodStats doWrite(WriterInterceptorContext context) throws IOException, WebApplicationException {
    ResponseMethodStats stats;
    long start = System.nanoTime();
    OutputStream originalOutputStream = context.getOutputStream();
    CountingOutputStream countingStream = new CountingOutputStream(originalOutputStream);
    context.setOutputStream(countingStream);
    try {
        context.proceed();
    } finally {
        long stop = System.nanoTime();
        long time = TimeUnit.NANOSECONDS.toMillis(stop - start);
        
        context.setOutputStream(originalOutputStream);
        
        stats = (ResponseMethodStats) context.getProperty(RESPONSE_STATS_NAME);
        if (stats == null) {
            log.warn("No response stats found for " + getClass() + ". Using default.");
            stats = new ResponseMethodStats();
        }
        
        RequestMethodStats requestStats = (RequestMethodStats) context.getProperty(REQUEST_STATS_NAME);
        if (requestStats == null) {
            log.warn("No request method stats found for " + getClass() + ". Using default.");
            requestStats = new RequestMethodStats();
            requestStats.callStartTime = stop + TimeUnit.MILLISECONDS.toNanos(1);
        }
        
        stats.serializationTime = time;
        stats.loginTime = requestStats.getLoginTime();
        stats.callTime = TimeUnit.NANOSECONDS.toMillis(stop - requestStats.getCallStartTime());
        stats.bytesWritten = countingStream.getCount();
        // Merge in the headers we saved in the postProcess call, if any.
        putNew(stats.responseHeaders, context.getHeaders());
    }
    
    return stats;
}
 
Example 6
Source File: GZipUtils.java    From bundletool with Apache License 2.0 5 votes vote down vote up
/** Calculates the GZip compressed size in bytes of the target {@code stream}. */
public static long calculateGzipCompressedSize(@WillNotClose InputStream stream)
    throws IOException {
  CountingOutputStream countingOutputStream =
      new CountingOutputStream(ByteStreams.nullOutputStream());
  try (GZIPOutputStream compressedStream = new GZIPOutputStream(countingOutputStream)) {
    ByteStreams.copy(stream, compressedStream);
  }
  return countingOutputStream.getCount();
}
 
Example 7
Source File: TableWriter.java    From mph-table with Apache License 2.0 5 votes vote down vote up
private static <K, V> void writeToIndexedOffsets(
        final File inputData,
        final File outputData,
        final File outputOffsets,
        final TableMeta<K, V> meta,
        final Iterable<Pair<K, V>> entries,
        final long dataSize) throws IOException {
    final long numEntries = meta.numEntries();
    final int offsetSize = meta.getConfig().bytesPerOffset(numEntries, dataSize);
    final long totalOffsetSize = numEntries * offsetSize;
    final BufferedFileDataOutputStream fileOut = new BufferedFileDataOutputStream(outputData);
    final CountingOutputStream countOut = new CountingOutputStream(fileOut);
    final long startMillis = System.currentTimeMillis();
    try (final MMapBuffer offsets = new MMapBuffer(outputOffsets, 0L, totalOffsetSize, FileChannel.MapMode.READ_WRITE, ByteOrder.nativeOrder());
         final LittleEndianDataOutputStream out = new LittleEndianDataOutputStream(countOut)) {
        for (final Pair<K, V> e : entries) {
            final long hash = meta.getHash(e.getFirst());
            if (hash < 0) {
                throw new IOException("inconsistent mph, known key hashed to -1: " + e.getFirst());
            }
            final long offset = countOut.getCount();
            if (offsetSize == 2) {
                offsets.memory().putShort(hash * 2L, (short) offset);
            } else if (offsetSize == 4) {
                offsets.memory().putInt(hash * 4L, (int) offset);
            } else {
                offsets.memory().putLong(hash * 8L, offset);
            }
            meta.getConfig().write(e.getFirst(), e.getSecond(), out);
        }
        offsets.sync(0L, totalOffsetSize);
        out.flush();
    }
    outputData.setReadOnly();
    outputOffsets.setReadOnly();
    LOGGER.info("wrote " + numEntries + " offsets for " + dataSize + " bytes of data in " +
                (System.currentTimeMillis() - startMillis) + " ms");
}
 
Example 8
Source File: RandomReadWritesTest.java    From BUbiNG with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("resource")
public static int[] writeRecords(final String path, final int numRecords, final WarcRecord[] randomRecords, final int parallel) throws IOException, InterruptedException {
	final ProgressLogger pl = new ProgressLogger(LOGGER, "records");
	if (parallel <= 1) pl.expectedUpdates = numRecords;
	final ProgressLogger plb = new ProgressLogger(LOGGER, "KB");
	final CountingOutputStream cos = new CountingOutputStream(new FastBufferedOutputStream(new FileOutputStream (path)));
	final WarcWriter ww;
	if (parallel == 0) {
		ww = new UncompressedWarcWriter(cos);
		pl.start("Writing records…");
	} else if (parallel == 1) {
		ww = new CompressedWarcWriter(cos);
		pl.start("Writing records (compressed)…");
	} else {
		ww = null;
		pl.start("SHOULD NOT HAPPEN");
		throw new IllegalStateException();
	}
	plb.start();
	long written = 0;
	final int[] position = new int[numRecords];
	for (int i = 0; i < numRecords; i++) {
		final int pos = RandomTestMocks.RNG.nextInt(randomRecords.length);
		position[i] = pos;
		ww.write(randomRecords[pos]);
		if (parallel <= 0) {
			pl.lightUpdate();
			plb.update((cos.getCount() - written) / 1024);
		}
		written = cos.getCount();
	}
	ww.close();
	pl.done(numRecords);
	plb.done(cos.getCount());
	return position;
}
 
Example 9
Source File: ImmutableBTreeIndex.java    From lsmtree with Apache License 2.0 5 votes vote down vote up
/**
 * @param path root lsm tree index directory
 * @param iterator the iterator
 * @param keySerializer the key serializer
 * @param valueSerializer the value serializer
 * @param blocksize block size
 * @param keepDeletions true to keep deletion
 * @param <K> the key type
 * @param <V> the value type
 * @throws IOException  if an I/O error occurs
 */
public static <K, V> void write(
        Path path,
        Iterator<Generation.Entry<K,V>> iterator,
        Serializer<K> keySerializer,
        Serializer<V> valueSerializer,
        final int blocksize,
        boolean keepDeletions
) throws IOException {
    if (blocksize > 65536) throw new IllegalArgumentException("block size must be less than 65536");
    Files.createDirectories(path);
    final BufferedFileDataOutputStream fileOut = new BufferedFileDataOutputStream(path.resolve("index.bin"));
    final CountingOutputStream out = new CountingOutputStream(fileOut);
    //tempFile is deleted in writeIndex
    final Path tempPath = Files.createTempFile("tmp", ".bin");
    final WriteLevelResult result = writeLevel(out, tempPath, iterator, keySerializer, valueSerializer, blocksize, keepDeletions);
    final int tmpCount = result.tmpCount;
    final long size = result.size;

    final long valueLevelLength = out.getCount();
    final Header header = writeIndex(out, tempPath, tmpCount, keySerializer, blocksize);
    header.valueLevelLength = valueLevelLength;
    header.size = size;
    header.hasDeletions = keepDeletions;
    new HeaderSerializer().write(header, new LittleEndianDataOutputStream(out));
    fileOut.sync();
    out.close();
}
 
Example 10
Source File: DumpingExtractorOutput.java    From webarchive-commons with Apache License 2.0 5 votes vote down vote up
public void output(Resource resource) throws IOException {
	OutputStream nullo = ByteStreams.nullOutputStream();
	CountingOutputStream co = new CountingOutputStream(nullo);
	StreamCopy.copy(resource.getInputStream(), co);
	long bytes = co.getCount();
	if(bytes > 0) {
		LOG.info(bytes + " unconsumed bytes in Resource InputStream.");
	}
	try {
		out.println(resource.getMetaData().getTopMetaData().toString(1));
	} catch (JSONException e) {
		LOG.warning(e.getMessage());
	}		
}
 
Example 11
Source File: SnowflakeFileTransferAgent.java    From snowflake-jdbc with Apache License 2.0 4 votes vote down vote up
/**
 * Compress an input stream with GZIP and return the result size, digest and
 * compressed stream.
 *
 * @param inputStream data input
 * @return result size, digest and compressed stream
 * @throws SnowflakeSQLException if encountered exception when compressing
 */
private static InputStreamWithMetadata compressStreamWithGZIP(
    InputStream inputStream) throws SnowflakeSQLException
{
  FileBackedOutputStream tempStream =
      new FileBackedOutputStream(MAX_BUFFER_SIZE, true);

  try
  {

    DigestOutputStream digestStream = new DigestOutputStream(tempStream,
                                                             MessageDigest.getInstance("SHA-256"));

    CountingOutputStream countingStream =
        new CountingOutputStream(digestStream);

    // construct a gzip stream with sync_flush mode
    GZIPOutputStream gzipStream;

    gzipStream = new GZIPOutputStream(countingStream, true);

    IOUtils.copy(inputStream, gzipStream);

    inputStream.close();

    gzipStream.finish();
    gzipStream.flush();

    countingStream.flush();

    return new InputStreamWithMetadata(countingStream.getCount(),
                                       Base64.encodeAsString(digestStream.getMessageDigest().digest()),
                                       tempStream);

  }
  catch (IOException | NoSuchAlgorithmException ex)
  {
    logger.error("Exception compressing input stream", ex);

    throw new SnowflakeSQLException(ex, SqlState.INTERNAL_ERROR,
                                    ErrorCode.INTERNAL_ERROR.getMessageCode(),
                                    "error encountered for compression");
  }

}
 
Example 12
Source File: SnowflakeFileTransferAgent.java    From snowflake-jdbc with Apache License 2.0 4 votes vote down vote up
/**
 * Compress an input stream with GZIP and return the result size, digest and
 * compressed stream.
 *
 * @param inputStream The input stream to compress
 * @return the compressed stream
 * @throws SnowflakeSQLException Will be thrown if there is a problem with
 *                               compression
 * @deprecated Can be removed when all accounts are encrypted
 */
@Deprecated
private static InputStreamWithMetadata compressStreamWithGZIPNoDigest(
    InputStream inputStream) throws SnowflakeSQLException
{
  try
  {
    FileBackedOutputStream tempStream =
        new FileBackedOutputStream(MAX_BUFFER_SIZE, true);

    CountingOutputStream countingStream =
        new CountingOutputStream(tempStream);

    // construct a gzip stream with sync_flush mode
    GZIPOutputStream gzipStream;

    gzipStream = new GZIPOutputStream(countingStream, true);

    IOUtils.copy(inputStream, gzipStream);

    inputStream.close();

    gzipStream.finish();
    gzipStream.flush();

    countingStream.flush();

    return new InputStreamWithMetadata(countingStream.getCount(),
                                       null, tempStream);

  }
  catch (IOException ex)
  {
    logger.error("Exception compressing input stream", ex);

    throw new SnowflakeSQLException(ex, SqlState.INTERNAL_ERROR,
                                    ErrorCode.INTERNAL_ERROR.getMessageCode(),
                                    "error encountered for compression");
  }

}
 
Example 13
Source File: ImmutableBTreeIndex.java    From lsmtree with Apache License 2.0 4 votes vote down vote up
private static <K,V> WriteLevelResult writeLevel(
        final CountingOutputStream counter,
        final Path tempPath,
        final Iterator<Generation.Entry<K,V>> iterator,
        final Serializer<K> keySerializer,
        final Serializer<V> valueSerializer,
        final int blocksize,
        final boolean keepDeletions
) throws IOException {
    Generation.Entry<K,V> next;
    if (!iterator.hasNext()) {
        return new WriteLevelResult(0, 0);
    }
    next = iterator.next();
    final LittleEndianDataOutputStream tmpOut = new LittleEndianDataOutputStream(new BufferedOutputStream(Files.newOutputStream(tempPath), 131072));
    final ByteArrayOutputStream buffer = new ByteArrayOutputStream();
    final LittleEndianDataOutputStream bufferDataOutput = new LittleEndianDataOutputStream(buffer);
    final ByteArrayOutputStream currentBlock = new ByteArrayOutputStream(blocksize);
    final CharArrayList keyOffsets = new CharArrayList();
    int tmpCount = 0;
    boolean done = false;
    final LittleEndianDataOutputStream out = new LittleEndianDataOutputStream(counter);
    long count = 0;
    outer: while (!done) {

        currentBlock.reset();
        keyOffsets.clear();
        if (!keepDeletions) {
            while (next.isDeleted()) {
                if (!iterator.hasNext()) break outer;
                next = iterator.next();
            }
        }
        keySerializer.write(next.getKey(), tmpOut);
        tmpOut.writeLong(counter.getCount());
        tmpCount++;
        while (true) {
            buffer.reset();
            final boolean skipDeleted = updateBuffer(next, keySerializer, valueSerializer, keepDeletions, bufferDataOutput);
            if (4+2*keyOffsets.size()+2+currentBlock.size()+buffer.size() > blocksize) {
                if (currentBlock.size() == 0) {
                    throw new IllegalArgumentException("key value pair is greater than block size");
                }
                break;
            }
            if (!skipDeleted) {
                keyOffsets.add((char)currentBlock.size());
                buffer.writeTo(currentBlock);
                count++;
            }
            if (!iterator.hasNext()) {
                done = true;
                break;
            }
            next = iterator.next();
        }
        if (keyOffsets.size() > 0) {
            final long start = counter.getCount();
            out.writeInt(keyOffsets.size());
            for (int i = 0; i < keyOffsets.size(); i++) {
                out.writeChar(keyOffsets.getChar(i));
            }
            currentBlock.writeTo(out);
            if (counter.getCount()-start > blocksize) {
                log.error("too big");
            }
        }
    }
    tmpOut.close();
    return new WriteLevelResult(tmpCount, count);
}
 
Example 14
Source File: EntryAccounting.java    From buck with Apache License 2.0 4 votes vote down vote up
public long writeLocalFileHeader(OutputStream out) throws IOException {
  if (method == Method.DEFLATE && entry instanceof CustomZipEntry) {
    // See http://www.pkware.com/documents/casestudies/APPNOTE.TXT (section 4.4.4)
    // Essentially, we're about to set bits 1 and 2 to indicate to tools such as zipinfo which
    // level of compression we're using. If we've not set a compression level, then we're using
    // the default one, which is right. It turns out. For your viewing pleasure:
    //
    // +----------+-------+-------+
    // | Level    | Bit 1 | Bit 2 |
    // +----------+-------+-------+
    // | Fastest  |   0   |   1   |
    // | Normal   |   0   |   0   |
    // | Best     |   1   |   0   |
    // +----------+-------+-------+
    int level = ((CustomZipEntry) entry).getCompressionLevel();
    switch (level) {
      case Deflater.BEST_COMPRESSION:
        flags |= (1 << 1);
        break;

      case Deflater.BEST_SPEED:
        flags |= (1 << 2);
        break;
    }
  }

  if (requiresDataDescriptor()) {
    flags |= DATA_DESCRIPTOR_FLAG;
  }

  CountingOutputStream stream = new CountingOutputStream(out);
  ByteIo.writeInt(stream, ZipEntry.LOCSIG);

  boolean useZip64;
  if (!requiresDataDescriptor() && entry.getSize() >= ZipConstants.ZIP64_MAGICVAL) {
    useZip64 = true;
  } else {
    useZip64 = false;
  }

  ByteIo.writeShort(stream, getRequiredExtractVersion(useZip64));
  ByteIo.writeShort(stream, flags);
  ByteIo.writeShort(stream, getCompressionMethod());
  ByteIo.writeInt(stream, getTime());

  // If we don't know the size or CRC of the data in advance (such as when in deflate mode),
  // we write zeros now, and append the actual values (the data descriptor) after the entry
  // bytes has been fully written.
  if (requiresDataDescriptor()) {
    ByteIo.writeInt(stream, 0);
    ByteIo.writeInt(stream, 0);
    ByteIo.writeInt(stream, 0);
  } else {
    ByteIo.writeInt(stream, entry.getCrc());
    if (entry.getSize() >= ZipConstants.ZIP64_MAGICVAL) {
      ByteIo.writeInt(stream, ZipConstants.ZIP64_MAGICVAL);
      ByteIo.writeInt(stream, ZipConstants.ZIP64_MAGICVAL);
    } else {
      ByteIo.writeInt(stream, entry.getSize());
      ByteIo.writeInt(stream, entry.getSize());
    }
  }

  byte[] nameBytes = entry.getName().getBytes(Charsets.UTF_8);
  ByteIo.writeShort(stream, nameBytes.length);
  ByteIo.writeShort(stream, useZip64 ? ZipConstants.ZIP64_LOCHDR : 0);
  stream.write(nameBytes);
  if (useZip64) {
    ByteIo.writeShort(stream, ZipConstants.ZIP64_EXTID);
    ByteIo.writeShort(stream, 16);
    ByteIo.writeLong(stream, entry.getSize());
    ByteIo.writeLong(stream, entry.getSize());
  }

  return stream.getCount();
}