com.google.common.io.CountingOutputStream Java Examples

The following examples show how to use com.google.common.io.CountingOutputStream. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: EntryAccounting.java    From buck with Apache License 2.0 6 votes vote down vote up
private long writeDataDescriptor(OutputStream rawOut) throws IOException {
  if (!requiresDataDescriptor()) {
    return 0;
  }

  CountingOutputStream out = new CountingOutputStream(rawOut);
  ByteIo.writeInt(out, ZipEntry.EXTSIG);
  ByteIo.writeInt(out, getCrc());
  if (getCompressedSize() >= ZipConstants.ZIP64_MAGICVAL
      || getSize() >= ZipConstants.ZIP64_MAGICVAL) {
    ByteIo.writeLong(out, getCompressedSize());
    ByteIo.writeLong(out, getSize());
  } else {
    ByteIo.writeInt(out, getCompressedSize());
    ByteIo.writeInt(out, getSize());
  }
  return out.getCount();
}
 
Example #2
Source File: JdbcAvroIO.java    From dbeam with Apache License 2.0 6 votes vote down vote up
@SuppressWarnings("deprecation") // uses internal test functionality.
@Override
protected void prepareWrite(WritableByteChannel channel) throws Exception {
  logger.info("jdbcavroio : Preparing write...");
  connection = jdbcAvroArgs.jdbcConnectionConfiguration().createConnection();
  Void destination = getDestination();
  Schema schema = dynamicDestinations.getSchema(destination);
  dataFileWriter =
      new DataFileWriter<>(new GenericDatumWriter<GenericRecord>(schema))
          .setCodec(jdbcAvroArgs.getCodecFactory())
          .setSyncInterval(syncInterval);
  dataFileWriter.setMeta("created_by", this.getClass().getCanonicalName());
  this.countingOutputStream = new CountingOutputStream(Channels.newOutputStream(channel));
  dataFileWriter.create(schema, this.countingOutputStream);
  logger.info("jdbcavroio : Write prepared");
}
 
Example #3
Source File: ServletResponseResultWriter.java    From endpoints-java with Apache License 2.0 6 votes vote down vote up
protected void write(int status, Map<String, String> headers, Object content) throws IOException {
  // write response status code
  servletResponse.setStatus(status);

  // write response headers
  if (headers != null) {
    for (Map.Entry<String, String> entry : headers.entrySet()) {
      servletResponse.addHeader(entry.getKey(), entry.getValue());
    }
  }

  // write response body
  if (content != null) {
    servletResponse.setContentType(SystemService.MIME_JSON);
    if (addContentLength) {
      CountingOutputStream counter = new CountingOutputStream(ByteStreams.nullOutputStream());
      objectWriter.writeValue(counter, content);
      servletResponse.setContentLength((int) counter.getCount());
    }
    objectWriter.writeValue(servletResponse.getOutputStream(), content);
  }
}
 
Example #4
Source File: ArithmeticCoderTest.java    From indexr with Apache License 2.0 6 votes vote down vote up
@Test
public void encodeDecodeTest() throws IOException {
    ArthmeticCoder.SimpleFrequency freq = new ArthmeticCoder.SimpleFrequency(counts);

    ByteArrayOutputStream encodedPool = new ByteArrayOutputStream();
    CountingOutputStream outputCounting = new CountingOutputStream(encodedPool);
    ArthmeticCoder.Encoder encoder = new ArthmeticCoder.Encoder(freq, new BitWrappedOutputStream(outputCounting));
    for (int s : symbols) {
        encoder.write(s);
    }
    encoder.seal();

    ByteArrayInputStream decodedPool = new ByteArrayInputStream(encodedPool.toByteArray());
    CountingInputStream inputCounting = new CountingInputStream(decodedPool);
    ArthmeticCoder.Decoder decoder = new ArthmeticCoder.Decoder(freq, new BitWrappedInputStream(inputCounting));
    int[] symbols2 = new int[symbols.length];
    for (int i = 0; i < symbols.length; i++) {
        symbols2[i] = decoder.read();
    }

    Assert.assertEquals(outputCounting.getCount(), inputCounting.getCount());
    Assert.assertArrayEquals(symbols, symbols2);
}
 
Example #5
Source File: FilesTargetRepository.java    From ache with Apache License 2.0 6 votes vote down vote up
private synchronized void openNewFile() throws IOException {
    if(currentFile != null) {
        // flush and automatically closes file
        try(OutputStream out = this.currentFile) {
            out.flush();
        }
    }
    long timestamp = System.currentTimeMillis();
    long count = 0;
    Path filePath;
    do {
        String file = String.format("crawl_data-%d-%d.deflate", timestamp, count++);
        filePath = directory.resolve(file);
    } while (Files.exists(filePath));
    OutputStream fileStream = new PrintStream(filePath.toFile());
    this.bytesCounter = new CountingOutputStream(fileStream);
    this.currentFile = new DeflaterOutputStream(this.bytesCounter, true);
}
 
Example #6
Source File: ColumnarMemoryStorePersister.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
private Map<TblColRef, Dictionary<String>> buildAndPersistDictionaries(FragmentMetaInfo fragmentMetaInfo,
        List<List<Object>> allColumnarValues, CountingOutputStream fragmentOut) throws IOException {
    Map<TblColRef, Dictionary<String>> dictMaps = Maps.newHashMap();
    List<DimDictionaryMetaInfo> dimDictionaryMetaInfos = Lists.newArrayList();
    for (int i = 0; i < dimensions.length; i++) {
        TblColRef dimension = dimensions[i];
        List<Object> dimValueList = allColumnarValues.get(i);
        Dictionary<String> dict;
        DimDictionaryMetaInfo dimDictionaryMetaInfo = new DimDictionaryMetaInfo();
        if (dimensionsUseDictEncoding.contains(dimension)) {
            dict = buildDictionary(dimension, dimValueList);
            dictMaps.put(dimension, dict);

            dimDictionaryMetaInfo.setDimName(dimension.getName());
            dimDictionaryMetaInfo.setDictType(dict.getClass().getName());
            dimDictionaryMetaInfo.setStartOffset((int) fragmentOut.getCount());

            DictionarySerializer.serialize(dict, fragmentOut);
            dimDictionaryMetaInfo.setDictLength((int) fragmentOut.getCount()
                    - dimDictionaryMetaInfo.getStartOffset());
            dimDictionaryMetaInfos.add(dimDictionaryMetaInfo);
        }
    }
    fragmentMetaInfo.setDimDictionaryMetaInfos(dimDictionaryMetaInfos);
    return dictMaps;
}
 
Example #7
Source File: GuavaCountingOutputStreamUnitTest.java    From tutorials with MIT License 6 votes vote down vote up
@Test(expected = RuntimeException.class)
public void givenData_whenCountReachesLimit_thenThrowException() throws Exception {
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    CountingOutputStream cos = new CountingOutputStream(out);

    byte[] data = new byte[1024];
    ByteArrayInputStream in = new ByteArrayInputStream(data);

    int b;
    while ((b = in.read()) != -1) {
        cos.write(b);
        if (cos.getCount() >= MAX) {
            throw new RuntimeException("Write limit reached");
        }
    }
}
 
Example #8
Source File: FragmentFilesMerger.java    From kylin-on-parquet-v2 with Apache License 2.0 6 votes vote down vote up
private Map<TblColRef, Dictionary<String>> mergeAndPersistDictionaries(FragmentMetaInfo fragmentMetaInfo,
        Map<TblColRef, List<Dictionary<String>>> dimDictListMap, CountingOutputStream fragmentOut)
        throws IOException {
    logger.info("merge dimension dictionaries");
    Map<TblColRef, Dictionary<String>> mergedDictMap = Maps.newHashMap();
    List<DimDictionaryMetaInfo> dimDictionaryMetaInfos = Lists.newArrayList();
    for (TblColRef dimension : parsedCubeInfo.dimensionsUseDictEncoding) {
        List<Dictionary<String>> dicts = dimDictListMap.get(dimension);
        MultipleDictionaryValueEnumerator multipleDictionaryValueEnumerator = new MultipleDictionaryValueEnumerator(
                dimension.getType(), dicts);
        Dictionary<String> mergedDict = DictionaryGenerator.buildDictionary(dimension.getType(),
                multipleDictionaryValueEnumerator);
        mergedDictMap.put(dimension, mergedDict);

        DimDictionaryMetaInfo dimDictionaryMetaInfo = new DimDictionaryMetaInfo();
        dimDictionaryMetaInfo.setDimName(dimension.getName());
        dimDictionaryMetaInfo.setDictType(mergedDict.getClass().getName());
        dimDictionaryMetaInfo.setStartOffset((int) fragmentOut.getCount());

        DictionarySerializer.serialize(mergedDict, fragmentOut);
        dimDictionaryMetaInfo.setDictLength((int) fragmentOut.getCount() - dimDictionaryMetaInfo.getStartOffset());
        dimDictionaryMetaInfos.add(dimDictionaryMetaInfo);
    }
    fragmentMetaInfo.setDimDictionaryMetaInfos(dimDictionaryMetaInfos);
    return mergedDictMap;
}
 
Example #9
Source File: ProtocolHandshake.java    From selenium with Apache License 2.0 5 votes vote down vote up
public Result createSession(HttpClient client, Command command)
    throws IOException {
  Capabilities desired = (Capabilities) command.getParameters().get("desiredCapabilities");
  desired = desired == null ? new ImmutableCapabilities() : desired;

  int threshold = (int) Math.min(Runtime.getRuntime().freeMemory() / 10, Integer.MAX_VALUE);
  FileBackedOutputStream os = new FileBackedOutputStream(threshold);
  try (
      CountingOutputStream counter = new CountingOutputStream(os);
      Writer writer = new OutputStreamWriter(counter, UTF_8);
      NewSessionPayload payload = NewSessionPayload.create(desired)) {

    payload.writeTo(writer);

    try (InputStream rawIn = os.asByteSource().openBufferedStream();
         BufferedInputStream contentStream = new BufferedInputStream(rawIn)) {
      Optional<Result> result = createSession(client, contentStream, counter.getCount());

      if (result.isPresent()) {
        Result toReturn = result.get();
        LOG.info(String.format("Detected dialect: %s", toReturn.dialect));
        return toReturn;
      }
    }
  } finally {
    os.reset();
  }

  throw new SessionNotCreatedException(
      String.format(
          "Unable to create new remote session. " +
          "desired capabilities = %s",
          desired));
}
 
Example #10
Source File: CsvOutputBuilder.java    From airpal with Apache License 2.0 5 votes vote down vote up
public CsvOutputBuilder(boolean includeHeader, UUID jobUUID, long maxFileSizeBytes, boolean compressedOutput) throws IOException {
    this.includeHeader = includeHeader;
    this.jobUUID = jobUUID;
    this.outputFile = File.createTempFile(jobUUID.toString(), FILE_SUFFIX);
    this.maxFileSizeBytes = maxFileSizeBytes;
    this.countingOutputStream = new CountingOutputStream(new FileOutputStream(this.outputFile));
    OutputStreamWriter writer;
    if (compressedOutput) {
        writer = new OutputStreamWriter(new GZIPOutputStream(this.countingOutputStream));
    }
    else {
        writer = new OutputStreamWriter(this.countingOutputStream);
    }
    this.csvWriter = new CSVWriter(writer);
}
 
Example #11
Source File: SimpleLogHandler.java    From bazel with Apache License 2.0 5 votes vote down vote up
/**
 * Opens the specified file in append mode, first closing the current file if needed.
 *
 * @throws IOException if the file could not be opened
 */
public void open(String path) throws IOException {
  try {
    close();
    file = new File(path);
    stream = new CountingOutputStream(new FileOutputStream(file, true));
    writer = new OutputStreamWriter(stream, UTF_8);
  } catch (IOException e) {
    close();
    throw e;
  }
}
 
Example #12
Source File: DelimitedTextFileReaderWriterFactory.java    From secor with Apache License 2.0 5 votes vote down vote up
public DelimitedTextFileWriter(LogFilePath path, CompressionCodec codec) throws IOException {
    Path fsPath = new Path(path.getLogFilePath());
    FileSystem fs = FileUtil.getFileSystem(path.getLogFilePath());
    this.mCountingStream = new CountingOutputStream(fs.create(fsPath));
    this.mWriter = (codec == null) ? new BufferedOutputStream(
            this.mCountingStream) : new BufferedOutputStream(
            codec.createOutputStream(this.mCountingStream,
                                     mCompressor = CodecPool.getCompressor(codec)));
}
 
Example #13
Source File: FlexibleDelimitedFileReaderWriterFactory.java    From secor with Apache License 2.0 5 votes vote down vote up
public FlexibleDelimitedFileWriter(LogFilePath path, CompressionCodec codec) throws IOException {
  Path fsPath = new Path(path.getLogFilePath());
  FileSystem fs = FileUtil.getFileSystem(path.getLogFilePath());
  this.mCountingStream = new CountingOutputStream(fs.create(fsPath));
  this.mWriter = (codec == null) ? new BufferedOutputStream(
  this.mCountingStream) : new BufferedOutputStream(
  codec.createOutputStream(this.mCountingStream,
  mCompressor = CodecPool.getCompressor(codec)));
}
 
Example #14
Source File: ImmutableBTreeIndex.java    From lsmtree with Apache License 2.0 5 votes vote down vote up
/**
 * @param path root lsm tree index directory
 * @param iterator the iterator
 * @param keySerializer the key serializer
 * @param valueSerializer the value serializer
 * @param blocksize block size
 * @param keepDeletions true to keep deletion
 * @param <K> the key type
 * @param <V> the value type
 * @throws IOException  if an I/O error occurs
 */
public static <K, V> void write(
        Path path,
        Iterator<Generation.Entry<K,V>> iterator,
        Serializer<K> keySerializer,
        Serializer<V> valueSerializer,
        final int blocksize,
        boolean keepDeletions
) throws IOException {
    if (blocksize > 65536) throw new IllegalArgumentException("block size must be less than 65536");
    Files.createDirectories(path);
    final BufferedFileDataOutputStream fileOut = new BufferedFileDataOutputStream(path.resolve("index.bin"));
    final CountingOutputStream out = new CountingOutputStream(fileOut);
    //tempFile is deleted in writeIndex
    final Path tempPath = Files.createTempFile("tmp", ".bin");
    final WriteLevelResult result = writeLevel(out, tempPath, iterator, keySerializer, valueSerializer, blocksize, keepDeletions);
    final int tmpCount = result.tmpCount;
    final long size = result.size;

    final long valueLevelLength = out.getCount();
    final Header header = writeIndex(out, tempPath, tmpCount, keySerializer, blocksize);
    header.valueLevelLength = valueLevelLength;
    header.size = size;
    header.hasDeletions = keepDeletions;
    new HeaderSerializer().write(header, new LittleEndianDataOutputStream(out));
    fileOut.sync();
    out.close();
}
 
Example #15
Source File: CappedDatabase.java    From glowroot with Apache License 2.0 5 votes vote down vote up
private long write(String type, Copier copier) throws IOException {
    long blockStartIndex;
    synchronized (lock) {
        if (closed) {
            return -1;
        }
        long startTick = ticker.read();
        out.startBlock();
        NonClosingCountingOutputStream countingStreamAfterCompression =
                new NonClosingCountingOutputStream(out);
        CountingOutputStream countingStreamBeforeCompression =
                new CountingOutputStream(newLZFOutputStream(countingStreamAfterCompression));
        copier.copyTo(countingStreamBeforeCompression);
        countingStreamBeforeCompression.close();
        long endTick = ticker.read();
        CappedDatabaseStats stats = statsByType.get(type);
        if (stats == null) {
            stats = new CappedDatabaseStats();
            statsByType.put(type, stats);
        }
        stats.record(countingStreamBeforeCompression.getCount(),
                countingStreamAfterCompression.getCount(), endTick - startTick);
        blockStartIndex = out.endBlock();
    }
    // fsync (if really needed here) does not need to be done under lock
    out.fsyncIfReallyNeeded();
    return blockStartIndex;
}
 
Example #16
Source File: DumpingExtractorOutput.java    From webarchive-commons with Apache License 2.0 5 votes vote down vote up
public void output(Resource resource) throws IOException {
	OutputStream nullo = ByteStreams.nullOutputStream();
	CountingOutputStream co = new CountingOutputStream(nullo);
	StreamCopy.copy(resource.getInputStream(), co);
	long bytes = co.getCount();
	if(bytes > 0) {
		LOG.info(bytes + " unconsumed bytes in Resource InputStream.");
	}
	try {
		out.println(resource.getMetaData().getTopMetaData().toString(1));
	} catch (JSONException e) {
		LOG.warning(e.getMessage());
	}		
}
 
Example #17
Source File: PayloadRecorder.java    From packagedrone with Eclipse Public License 1.0 5 votes vote down vote up
public PayloadRecorder ( final boolean autoFinish, final PayloadCoding payloadCoding, final String payloadFlags, final DigestAlgorithm fileDigestAlgorithm ) throws IOException
{
    this.autoFinish = autoFinish;

    this.fileDigestAlgorithm = fileDigestAlgorithm;

    this.tempFile = Files.createTempFile ( "rpm-", null );

    try
    {
        this.fileStream = new BufferedOutputStream ( Files.newOutputStream ( this.tempFile, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING ) );

        this.payloadCounter = new CountingOutputStream ( this.fileStream );

        this.payloadCoding = payloadCoding;

        this.payloadFlags = Optional.ofNullable ( payloadFlags );

        final OutputStream payloadStream = this.payloadCoding.createProvider ().createOutputStream ( this.payloadCounter, this.payloadFlags );

        this.archiveCounter = new CountingOutputStream ( payloadStream );

        // setup archive stream

        this.archiveStream = new CpioArchiveOutputStream ( this.archiveCounter, CpioConstants.FORMAT_NEW, 4, CharsetNames.UTF_8 );
    }
    catch ( final IOException e )
    {
        Files.deleteIfExists ( this.tempFile );
        throw e;
    }
}
 
Example #18
Source File: BlobStore.java    From packagedrone with Eclipse Public License 1.0 5 votes vote down vote up
public long handleCreate ( final String id, final IOConsumer<OutputStream> consumer ) throws IOException
{
    final Path path = makeDataPath ( id );

    Files.createDirectories ( path.getParent () );

    try ( CountingOutputStream stream = new CountingOutputStream ( new BufferedOutputStream ( Files.newOutputStream ( path, StandardOpenOption.CREATE_NEW ) ) ) )
    {
        consumer.accept ( stream );
        return stream.getCount ();
    }
}
 
Example #19
Source File: NodeLogResource.java    From blueocean-plugin with MIT License 5 votes vote down vote up
private long appendError(String msg, OutputStream w) throws IOException {
    try (CountingOutputStream os = new CountingOutputStream(w)) {
        os.write(msg.getBytes("UTF-8"));
        os.flush();
        return os.getCount();
    }
}
 
Example #20
Source File: RcFileFileWriter.java    From presto with Apache License 2.0 5 votes vote down vote up
public RcFileFileWriter(
        OutputStream outputStream,
        Callable<Void> rollbackAction,
        RcFileEncoding rcFileEncoding,
        List<Type> fileColumnTypes,
        Optional<String> codecName,
        int[] fileInputColumnIndexes,
        Map<String, String> metadata,
        Optional<Supplier<RcFileDataSource>> validationInputFactory)
        throws IOException
{
    this.outputStream = new CountingOutputStream(outputStream);
    rcFileWriter = new RcFileWriter(
            new OutputStreamSliceOutput(this.outputStream),
            fileColumnTypes,
            rcFileEncoding,
            codecName,
            new AircompressorCodecFactory(new HadoopCodecFactory(getClass().getClassLoader())),
            metadata,
            validationInputFactory.isPresent());
    this.rollbackAction = requireNonNull(rollbackAction, "rollbackAction is null");

    this.fileInputColumnIndexes = requireNonNull(fileInputColumnIndexes, "outputColumnInputIndexes is null");

    ImmutableList.Builder<Block> nullBlocks = ImmutableList.builder();
    for (Type fileColumnType : fileColumnTypes) {
        BlockBuilder blockBuilder = fileColumnType.createBlockBuilder(null, 1, 0);
        blockBuilder.appendNull();
        nullBlocks.add(blockBuilder.build());
    }
    this.nullBlocks = nullBlocks.build();
    this.validationInputFactory = validationInputFactory;
}
 
Example #21
Source File: RunLengthCompressColumnTest.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public int writeCompressData1(int rowCnt) throws IOException {
    int compressBolckSize = 64 * 1024;
    CountingOutputStream countingOutputStream = new CountingOutputStream(new FileOutputStream(tmpColFile));
    RunLengthCompressedColumnWriter writer = new RunLengthCompressedColumnWriter(4, rowCnt, compressBolckSize,
            countingOutputStream);
    int[] colValues = new int[rowCnt];
    for (int i = 0; i < rowCnt; i++) {
        colValues[i] = i;
    }
    for (int i = 0; i < rowCnt; i++) {
        writer.write(Bytes.toBytes(colValues[i]));
    }
    writer.flush();
    return (int) countingOutputStream.getCount();
}
 
Example #22
Source File: OrcMetadataWriter.java    From presto with Apache License 2.0 5 votes vote down vote up
private static int writeProtobufObject(OutputStream output, MessageLite object)
        throws IOException
{
    CountingOutputStream countingOutput = new CountingOutputStream(output);
    object.writeTo(countingOutput);
    return toIntExact(countingOutput.getCount());
}
 
Example #23
Source File: FragmentFilesMerger.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public CuboidColumnDataWriter(long cuboidId, String colName) throws IOException {
    this.cuboidId = cuboidId;
    this.colName = colName;

    this.tmpColDataFile = new File(mergeWorkingDirectory, cuboidId + "-" + colName + ".data");
    this.output = new CountingOutputStream(
            new BufferedOutputStream(FileUtils.openOutputStream(tmpColDataFile)));
}
 
Example #24
Source File: FragmentFilesMerger.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public CuboidMetricDataWriter(long cuboidId, String metricName, int maxValLen) throws IOException {
    this.cuboidId = cuboidId;
    this.metricName = metricName;
    this.maxValLen = maxValLen;
    this.tmpMetricDataFile = new File(mergeWorkingDirectory, cuboidId + "-" + metricName + ".data");
    this.countingOutput = new CountingOutputStream(
            new BufferedOutputStream(FileUtils.openOutputStream(tmpMetricDataFile)));
    this.output = new DataOutputStream(countingOutput);
}
 
Example #25
Source File: ColumnarMemoryStorePersister.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
private CuboidMetaInfo persistCuboidData(long cuboidID, TblColRef[] dimensions,
        Map<TblColRef, Dictionary<String>> dictMaps, List<List<Object>> columnarCuboidValues,
        CountingOutputStream fragmentOutput) throws Exception {
    CuboidMetaInfo cuboidMeta = new CuboidMetaInfo();
    int dimCnt = dimensions.length;
    List<DimensionMetaInfo> dimensionMetaList = Lists.newArrayListWithExpectedSize(dimCnt);
    cuboidMeta.setDimensionsInfo(dimensionMetaList);
    cuboidMeta.setNumberOfDim(dimCnt);
    List<MetricMetaInfo> metricMetaInfoList = Lists.newArrayListWithCapacity(measures.length);
    cuboidMeta.setMetricsInfo(metricMetaInfoList);
    cuboidMeta.setNumberOfMetrics(measures.length);

    long rowNum = -1;
    for (int i = 0; i < dimCnt; i++) {
        if (rowNum == -1) {
            rowNum = columnarCuboidValues.get(i).size();
        }
        persistDimension(cuboidID, columnarCuboidValues.get(i), dimensionMetaList, fragmentOutput,
                dimensions[i], dictMaps);
    }

    for (int i = 0; i < measures.length; i++) {
        persistMetric(cuboidID, columnarCuboidValues.get(dimCnt + i), metricMetaInfoList, i, fragmentOutput);
    }
    cuboidMeta.setNumberOfRows(rowNum);
    return cuboidMeta;
}
 
Example #26
Source File: ColumnarMemoryStorePersister.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
/**
     * This method is used to persist the metrics data to disk file.
     *
     * @param metricValueList
     * @param metricMetaInfoList
     * @param indexOut
     * @throws IOException
     */
    @SuppressWarnings({ "rawtypes", "unchecked" })
    private void persistMetric(long cuboidId, List<Object> metricValueList, List<MetricMetaInfo> metricMetaInfoList,
            int metricIdx, CountingOutputStream indexOut) throws IOException {
        Stopwatch stopwatch = new Stopwatch();
        stopwatch.start();

        MetricMetaInfo metricMeta = new MetricMetaInfo();
        metricMetaInfoList.add(metricMeta);
        String measureName = measures[metricIdx].getName();
        metricMeta.setName(measureName);
        metricMeta.setCol(metricIdx);
        metricMeta.setStartOffset((int) indexOut.getCount());

        DataType type = measures[metricIdx].getFunction().getReturnDataType();

        ColumnarMetricsEncoding metricsEncoding = ColumnarMetricsEncodingFactory.create(type);
        DataTypeSerializer serializer = metricsEncoding.asDataTypeSerializer();
        DataOutputStream metricsOut = new DataOutputStream(indexOut);

        int maxLength = serializer.maxLength();
        metricMeta.setMaxSerializeLength(maxLength);
        ByteBuffer metricsBuf = ByteBuffer.allocate(maxLength);
        ColumnarStoreMetricsDesc cStoreMetricsDesc = getColumnarStoreMetricsDesc(metricsEncoding);
        ColumnDataWriter metricsWriter = cStoreMetricsDesc.getMetricsWriter(metricsOut, metricValueList.size());
//        metricMeta.setStoreInFixedLength(false);
        for (Object metricValue : metricValueList) {
            metricsBuf.clear();
            serializer.serialize(metricValue, metricsBuf);
            byte[] metricBytes = Arrays.copyOf(metricsBuf.array(), metricsBuf.position());
            metricsWriter.write(metricBytes);
        }
        metricsWriter.flush();
        metricMeta.setMetricLength(metricsOut.size());
        metricMeta.setCompression(cStoreMetricsDesc.getCompression().name());
        stopwatch.stop();
        if (logger.isDebugEnabled()) {
            logger.debug("cuboid-{} saved measure:{}, took: {}ms", cuboidId, measureName, stopwatch.elapsedMillis());
        }
    }
 
Example #27
Source File: LZ4CompressColumnTest.java    From kylin-on-parquet-v2 with Apache License 2.0 5 votes vote down vote up
public int writeCompressedData(int rowCnt) throws IOException {
    int compressBolckSize = 64 * 1024;
    CountingOutputStream countingOutputStream = new CountingOutputStream(new FileOutputStream(tmpColFile));
    LZ4CompressedColumnWriter writer = new LZ4CompressedColumnWriter(4, rowCnt, compressBolckSize,
            countingOutputStream);
    int[] colValues = new int[rowCnt];
    for (int i = 0; i < rowCnt; i++) {
        colValues[i] = i;
    }
    for (int i = 0; i < rowCnt; i++) {
        writer.write(Bytes.toBytes(colValues[i]));
    }
    writer.flush();
    return (int) countingOutputStream.getCount();
}
 
Example #28
Source File: RandomReadWritesTest.java    From BUbiNG with Apache License 2.0 5 votes vote down vote up
@SuppressWarnings("resource")
public static int[] writeRecords(final String path, final int numRecords, final WarcRecord[] randomRecords, final int parallel) throws IOException, InterruptedException {
	final ProgressLogger pl = new ProgressLogger(LOGGER, "records");
	if (parallel <= 1) pl.expectedUpdates = numRecords;
	final ProgressLogger plb = new ProgressLogger(LOGGER, "KB");
	final CountingOutputStream cos = new CountingOutputStream(new FastBufferedOutputStream(new FileOutputStream (path)));
	final WarcWriter ww;
	if (parallel == 0) {
		ww = new UncompressedWarcWriter(cos);
		pl.start("Writing records…");
	} else if (parallel == 1) {
		ww = new CompressedWarcWriter(cos);
		pl.start("Writing records (compressed)…");
	} else {
		ww = null;
		pl.start("SHOULD NOT HAPPEN");
		throw new IllegalStateException();
	}
	plb.start();
	long written = 0;
	final int[] position = new int[numRecords];
	for (int i = 0; i < numRecords; i++) {
		final int pos = RandomTestMocks.RNG.nextInt(randomRecords.length);
		position[i] = pos;
		ww.write(randomRecords[pos]);
		if (parallel <= 0) {
			pl.lightUpdate();
			plb.update((cos.getCount() - written) / 1024);
		}
		written = cos.getCount();
	}
	ww.close();
	pl.done(numRecords);
	plb.done(cos.getCount());
	return position;
}
 
Example #29
Source File: TableWriter.java    From mph-table with Apache License 2.0 5 votes vote down vote up
private static <K, V> void writeToIndexedOffsets(
        final File inputData,
        final File outputData,
        final File outputOffsets,
        final TableMeta<K, V> meta,
        final Iterable<Pair<K, V>> entries,
        final long dataSize) throws IOException {
    final long numEntries = meta.numEntries();
    final int offsetSize = meta.getConfig().bytesPerOffset(numEntries, dataSize);
    final long totalOffsetSize = numEntries * offsetSize;
    final BufferedFileDataOutputStream fileOut = new BufferedFileDataOutputStream(outputData);
    final CountingOutputStream countOut = new CountingOutputStream(fileOut);
    final long startMillis = System.currentTimeMillis();
    try (final MMapBuffer offsets = new MMapBuffer(outputOffsets, 0L, totalOffsetSize, FileChannel.MapMode.READ_WRITE, ByteOrder.nativeOrder());
         final LittleEndianDataOutputStream out = new LittleEndianDataOutputStream(countOut)) {
        for (final Pair<K, V> e : entries) {
            final long hash = meta.getHash(e.getFirst());
            if (hash < 0) {
                throw new IOException("inconsistent mph, known key hashed to -1: " + e.getFirst());
            }
            final long offset = countOut.getCount();
            if (offsetSize == 2) {
                offsets.memory().putShort(hash * 2L, (short) offset);
            } else if (offsetSize == 4) {
                offsets.memory().putInt(hash * 4L, (int) offset);
            } else {
                offsets.memory().putLong(hash * 8L, offset);
            }
            meta.getConfig().write(e.getFirst(), e.getSecond(), out);
        }
        offsets.sync(0L, totalOffsetSize);
        out.flush();
    }
    outputData.setReadOnly();
    outputOffsets.setReadOnly();
    LOGGER.info("wrote " + numEntries + " offsets for " + dataSize + " bytes of data in " +
                (System.currentTimeMillis() - startMillis) + " ms");
}
 
Example #30
Source File: BaseMethodStatsInterceptor.java    From datawave with Apache License 2.0 5 votes vote down vote up
protected ResponseMethodStats doWrite(WriterInterceptorContext context) throws IOException, WebApplicationException {
    ResponseMethodStats stats;
    long start = System.nanoTime();
    OutputStream originalOutputStream = context.getOutputStream();
    CountingOutputStream countingStream = new CountingOutputStream(originalOutputStream);
    context.setOutputStream(countingStream);
    try {
        context.proceed();
    } finally {
        long stop = System.nanoTime();
        long time = TimeUnit.NANOSECONDS.toMillis(stop - start);
        
        context.setOutputStream(originalOutputStream);
        
        stats = (ResponseMethodStats) context.getProperty(RESPONSE_STATS_NAME);
        if (stats == null) {
            log.warn("No response stats found for " + getClass() + ". Using default.");
            stats = new ResponseMethodStats();
        }
        
        RequestMethodStats requestStats = (RequestMethodStats) context.getProperty(REQUEST_STATS_NAME);
        if (requestStats == null) {
            log.warn("No request method stats found for " + getClass() + ". Using default.");
            requestStats = new RequestMethodStats();
            requestStats.callStartTime = stop + TimeUnit.MILLISECONDS.toNanos(1);
        }
        
        stats.serializationTime = time;
        stats.loginTime = requestStats.getLoginTime();
        stats.callTime = TimeUnit.NANOSECONDS.toMillis(stop - requestStats.getCallStartTime());
        stats.bytesWritten = countingStream.getCount();
        // Merge in the headers we saved in the postProcess call, if any.
        putNew(stats.responseHeaders, context.getHeaders());
    }
    
    return stats;
}