Java Code Examples for org.apache.hadoop.io.compress.CodecPool#returnCompressor()

The following examples show how to use org.apache.hadoop.io.compress.CodecPool#returnCompressor() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   File: SequenceFile.java    License: Apache License 2.0 6 votes vote down vote up
/** Close the file. */
@Override
public synchronized void close() throws IOException {
  keySerializer.close();
  uncompressedValSerializer.close();
  if (compressedValSerializer != null) {
    compressedValSerializer.close();
  }

  CodecPool.returnCompressor(compressor);
  compressor = null;
  
  if (out != null) {
    
    // Close the underlying stream iff we own it...
    if (ownOutputStream) {
      out.close();
    } else {
      out.flush();
    }
    out = null;
  }
}
 
Example 2
Source Project: big-c   File: SequenceFile.java    License: Apache License 2.0 6 votes vote down vote up
/** Close the file. */
@Override
public synchronized void close() throws IOException {
  keySerializer.close();
  uncompressedValSerializer.close();
  if (compressedValSerializer != null) {
    compressedValSerializer.close();
  }

  CodecPool.returnCompressor(compressor);
  compressor = null;
  
  if (out != null) {
    
    // Close the underlying stream iff we own it...
    if (ownOutputStream) {
      out.close();
    } else {
      out.flush();
    }
    out = null;
  }
}
 
Example 3
Source Project: gemfirexd-oss   File: SequenceFile.java    License: Apache License 2.0 6 votes vote down vote up
/** Close the file. */
@Override
public synchronized void close() throws IOException {
  keySerializer.close();
  uncompressedValSerializer.close();
  if (compressedValSerializer != null) {
    compressedValSerializer.close();
  }

  CodecPool.returnCompressor(compressor);
  compressor = null;
  
  if (out != null) {
    
    // Close the underlying stream iff we own it...
    if (ownOutputStream) {
      out.close();
    } else {
      out.flush();
    }
    out = null;
  }
}
 
Example 4
Source Project: hadoop-gpu   File: SequenceFile.java    License: Apache License 2.0 6 votes vote down vote up
/** Close the file. */
public synchronized void close() throws IOException {
  keySerializer.close();
  uncompressedValSerializer.close();
  if (compressedValSerializer != null) {
    compressedValSerializer.close();
  }

  CodecPool.returnCompressor(compressor);
  compressor = null;
  
  if (out != null) {
    
    // Close the underlying stream iff we own it...
    if (ownOutputStream) {
      out.close();
    } else {
      out.flush();
    }
    out = null;
  }
}
 
Example 5
Source Project: RDFS   File: SequenceFile.java    License: Apache License 2.0 6 votes vote down vote up
/** Close the file. */
public synchronized void close() throws IOException {
  keySerializer.close();
  uncompressedValSerializer.close();
  if (compressedValSerializer != null) {
    compressedValSerializer.close();
  }

  CodecPool.returnCompressor(compressor);
  compressor = null;
  
  if (out != null) {
    
    // Close the underlying stream iff we own it...
    if (ownOutputStream) {
      out.close();
    } else {
      out.flush();
    }
    out = null;
  }
}
 
Example 6
Source Project: presto   File: HadoopCompressor.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public CompressedSliceOutput get()
{
    try {
        compressor.reset();
        bufferedOutput.reset();
        CompressionOutputStream compressionStream = codec.createOutputStream(bufferedOutput, compressor);
        return new CompressedSliceOutput(compressionStream, bufferedOutput, this, () -> CodecPool.returnCompressor(compressor));
    }
    catch (IOException e) {
        throw new UncheckedIOException(e);
    }
}
 
Example 7
Source Project: hadoop   File: DefaultOutputter.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void close() throws IOException {
  try {
    writer.close();
  } finally {
    if (compressor != null) {
      CodecPool.returnCompressor(compressor);
    }
  }
}
 
Example 8
Source Project: hadoop   File: Compression.java    License: Apache License 2.0 5 votes vote down vote up
public void returnCompressor(Compressor compressor) {
  if (compressor != null) {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Return a compressor: " + compressor.hashCode());
    }
    CodecPool.returnCompressor(compressor);
  }
}
 
Example 9
Source Project: big-c   File: DefaultOutputter.java    License: Apache License 2.0 5 votes vote down vote up
@Override
public void close() throws IOException {
  try {
    writer.close();
  } finally {
    if (compressor != null) {
      CodecPool.returnCompressor(compressor);
    }
  }
}
 
Example 10
Source Project: big-c   File: Compression.java    License: Apache License 2.0 5 votes vote down vote up
public void returnCompressor(Compressor compressor) {
  if (compressor != null) {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Return a compressor: " + compressor.hashCode());
    }
    CodecPool.returnCompressor(compressor);
  }
}
 
Example 11
Source Project: hadoop   File: IFile.java    License: Apache License 2.0 4 votes vote down vote up
public void close() throws IOException {

      // When IFile writer is created by BackupStore, we do not have
      // Key and Value classes set. So, check before closing the
      // serializers
      if (keyClass != null) {
        keySerializer.close();
        valueSerializer.close();
      }

      // Write EOF_MARKER for key/value length
      WritableUtils.writeVInt(out, EOF_MARKER);
      WritableUtils.writeVInt(out, EOF_MARKER);
      decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
      
      //Flush the stream
      out.flush();
  
      if (compressOutput) {
        // Flush
        compressedOut.finish();
        compressedOut.resetState();
      }
      
      // Close the underlying stream iff we own it...
      if (ownOutputStream) {
        out.close();
      }
      else {
        // Write the checksum
        checksumOut.finish();
      }

      compressedBytesWritten = rawOut.getPos() - start;

      if (compressOutput) {
        // Return back the compressor
        CodecPool.returnCompressor(compressor);
        compressor = null;
      }

      out = null;
      if(writtenRecordsCounter != null) {
        writtenRecordsCounter.increment(numRecordsWritten);
      }
    }
 
Example 12
Source Project: tez   File: IFile.java    License: Apache License 2.0 4 votes vote down vote up
public void close() throws IOException {
  if (closed.getAndSet(true)) {
    throw new IOException("Writer was already closed earlier");
  }

  // When IFile writer is created by BackupStore, we do not have
  // Key and Value classes set. So, check before closing the
  // serializers
  if (closeSerializers) {
    keySerializer.close();
    valueSerializer.close();
  }

  // write V_END_MARKER as needed
  writeValueMarker(out);

  // Write EOF_MARKER for key/value length
  WritableUtils.writeVInt(out, EOF_MARKER);
  WritableUtils.writeVInt(out, EOF_MARKER);
  decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
  //account for header bytes
  decompressedBytesWritten += HEADER.length;

  // Close the underlying stream iff we own it...
  if (ownOutputStream) {
    out.close();
  } else {
    if (compressOutput) {
      // Flush
      compressedOut.finish();
      compressedOut.resetState();
    }
    // Write the checksum and flush the buffer
    checksumOut.finish();
  }
  //header bytes are already included in rawOut
  compressedBytesWritten = rawOut.getPos() - start;

  if (compressOutput) {
    // Return back the compressor
    CodecPool.returnCompressor(compressor);
    compressor = null;
  }

  out = null;
  if (writtenRecordsCounter != null) {
    writtenRecordsCounter.increment(numRecordsWritten);
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Total keys written=" + numRecordsWritten + "; rleEnabled=" + rle + "; Savings" +
        "(due to multi-kv/rle)=" + totalKeySaving + "; number of RLEs written=" +
        rleWritten + "; compressedLen=" + compressedBytesWritten + "; rawLen="
        + decompressedBytesWritten);
  }
}
 
Example 13
Source Project: big-c   File: IFile.java    License: Apache License 2.0 4 votes vote down vote up
public void close() throws IOException {

      // When IFile writer is created by BackupStore, we do not have
      // Key and Value classes set. So, check before closing the
      // serializers
      if (keyClass != null) {
        keySerializer.close();
        valueSerializer.close();
      }

      // Write EOF_MARKER for key/value length
      WritableUtils.writeVInt(out, EOF_MARKER);
      WritableUtils.writeVInt(out, EOF_MARKER);
      decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
      
      //Flush the stream
      out.flush();
  
      if (compressOutput) {
        // Flush
        compressedOut.finish();
        compressedOut.resetState();
      }
      
      // Close the underlying stream iff we own it...
      if (ownOutputStream) {
        out.close();
      }
      else {
        // Write the checksum
        checksumOut.finish();
      }

      compressedBytesWritten = rawOut.getPos() - start;

      if (compressOutput) {
        // Return back the compressor
        CodecPool.returnCompressor(compressor);
        compressor = null;
      }

      out = null;
      if(writtenRecordsCounter != null) {
        writtenRecordsCounter.increment(numRecordsWritten);
      }
    }
 
Example 14
Source Project: hadoop-gpu   File: IFile.java    License: Apache License 2.0 4 votes vote down vote up
public void close() throws IOException {

      // Close the serializers
      keySerializer.close();
      valueSerializer.close();

      // Write EOF_MARKER for key/value length
      WritableUtils.writeVInt(out, EOF_MARKER);
      WritableUtils.writeVInt(out, EOF_MARKER);
      decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
      
      //Flush the stream
      out.flush();
  
      if (compressOutput) {
        // Flush
        compressedOut.finish();
        compressedOut.resetState();
      }
      
      // Close the underlying stream iff we own it...
      if (ownOutputStream) {
        out.close();
      }
      else {
        // Write the checksum
        checksumOut.finish();
      }

      compressedBytesWritten = rawOut.getPos() - start;

      if (compressOutput) {
        // Return back the compressor
        CodecPool.returnCompressor(compressor);
        compressor = null;
      }

      out = null;
      if(writtenRecordsCounter != null) {
        writtenRecordsCounter.increment(numRecordsWritten);
      }
    }
 
Example 15
Source Project: hbase   File: Compression.java    License: Apache License 2.0 4 votes vote down vote up
public void returnCompressor(Compressor compressor) {
  if (compressor != null) {
    if (LOG.isTraceEnabled()) LOG.trace("Returning compressor " + compressor + " to pool.");
    CodecPool.returnCompressor(compressor);
  }
}
 
Example 16
Source Project: parquet-mr   File: CodecFactory.java    License: Apache License 2.0 4 votes vote down vote up
@Override
public void release() {
  if (compressor != null) {
    CodecPool.returnCompressor(compressor);
  }
}
 
Example 17
@Override
public void close() throws IOException {
    this.mWriter.close();
    CodecPool.returnCompressor(mCompressor);
    mCompressor = null;
}
 
Example 18
Source Project: RDFS   File: IFile.java    License: Apache License 2.0 4 votes vote down vote up
public void close() throws IOException {
  
  // Close the serializers
  keySerializer.close();
  valueSerializer.close();

  // Write EOF_MARKER for key/value length
  WritableUtils.writeVInt(out, EOF_MARKER);
  WritableUtils.writeVInt(out, EOF_MARKER);
  decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
  
  //Flush the stream
  out.flush();
  
  if (compressOutput) {
    // Flush
    compressedOut.finish();
    compressedOut.resetState();
  }
  
  // Close the underlying stream iff we own it...
  if (ownOutputStream) {
    out.close();
  }
  else {
    // Write the checksum
    checksumOut.finish();
  }

  compressedBytesWritten = rawOut.getPos() - start;

  if (compressOutput) {
    // Return back the compressor
    CodecPool.returnCompressor(compressor);
    compressor = null;
  }

  out = null;
  if(writtenRecordsCounter != null) {
    writtenRecordsCounter.increment(numRecordsWritten);
  }
}
 
Example 19
Source Project: RDFS   File: Compression.java    License: Apache License 2.0 4 votes vote down vote up
public void returnCompressor(Compressor compressor) {
  if (compressor != null) {
    LOG.debug("Return a compressor: " + compressor.hashCode());
    CodecPool.returnCompressor(compressor);
  }
}
 
Example 20
Source Project: incubator-tez   File: IFile.java    License: Apache License 2.0 4 votes vote down vote up
public void close() throws IOException {
  checkState(!closed.getAndSet(true), "Writer was already closed earlier");

  // When IFile writer is created by BackupStore, we do not have
  // Key and Value classes set. So, check before closing the
  // serializers
  if (keyClass != null) {
    keySerializer.close();
    valueSerializer.close();
  }

  // write V_END_MARKER as needed
  writeValueMarker(out);

  // Write EOF_MARKER for key/value length
  WritableUtils.writeVInt(out, EOF_MARKER);
  WritableUtils.writeVInt(out, EOF_MARKER);
  decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
  //account for header bytes
  decompressedBytesWritten += HEADER.length;

  //Flush the stream
  out.flush();

  if (compressOutput) {
    // Flush
    compressedOut.finish();
    compressedOut.resetState();
  }

  // Close the underlying stream iff we own it...
  if (ownOutputStream) {
    out.close();
  }
  else {
    // Write the checksum
    checksumOut.finish();
  }
  //header bytes are already included in rawOut
  compressedBytesWritten = rawOut.getPos() - start;

  if (compressOutput) {
    // Return back the compressor
    CodecPool.returnCompressor(compressor);
    compressor = null;
  }

  out = null;
  if (writtenRecordsCounter != null) {
    writtenRecordsCounter.increment(numRecordsWritten);
  }
  LOG.info("Total keys written=" + numRecordsWritten + "; Savings(optimized due to " +
      "multi-kv/rle)=" + totalKeySaving + "; number of RLEs written=" + rleWritten);
}