Java Code Examples for org.apache.hadoop.io.compress.CodecPool#returnCompressor()

The following examples show how to use org.apache.hadoop.io.compress.CodecPool#returnCompressor() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SequenceFile.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** Close the file. */
@Override
public synchronized void close() throws IOException {
  keySerializer.close();
  uncompressedValSerializer.close();
  if (compressedValSerializer != null) {
    compressedValSerializer.close();
  }

  CodecPool.returnCompressor(compressor);
  compressor = null;
  
  if (out != null) {
    
    // Close the underlying stream iff we own it...
    if (ownOutputStream) {
      out.close();
    } else {
      out.flush();
    }
    out = null;
  }
}
 
Example 2
Source File: SequenceFile.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/** Close the file. */
public synchronized void close() throws IOException {
  keySerializer.close();
  uncompressedValSerializer.close();
  if (compressedValSerializer != null) {
    compressedValSerializer.close();
  }

  CodecPool.returnCompressor(compressor);
  compressor = null;
  
  if (out != null) {
    
    // Close the underlying stream iff we own it...
    if (ownOutputStream) {
      out.close();
    } else {
      out.flush();
    }
    out = null;
  }
}
 
Example 3
Source File: SequenceFile.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** Close the file. */
@Override
public synchronized void close() throws IOException {
  keySerializer.close();
  uncompressedValSerializer.close();
  if (compressedValSerializer != null) {
    compressedValSerializer.close();
  }

  CodecPool.returnCompressor(compressor);
  compressor = null;
  
  if (out != null) {
    
    // Close the underlying stream iff we own it...
    if (ownOutputStream) {
      out.close();
    } else {
      out.flush();
    }
    out = null;
  }
}
 
Example 4
Source File: SequenceFile.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
/** Close the file. */
@Override
public synchronized void close() throws IOException {
  keySerializer.close();
  uncompressedValSerializer.close();
  if (compressedValSerializer != null) {
    compressedValSerializer.close();
  }

  CodecPool.returnCompressor(compressor);
  compressor = null;
  
  if (out != null) {
    
    // Close the underlying stream iff we own it...
    if (ownOutputStream) {
      out.close();
    } else {
      out.flush();
    }
    out = null;
  }
}
 
Example 5
Source File: SequenceFile.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/** Close the file. */
public synchronized void close() throws IOException {
  keySerializer.close();
  uncompressedValSerializer.close();
  if (compressedValSerializer != null) {
    compressedValSerializer.close();
  }

  CodecPool.returnCompressor(compressor);
  compressor = null;
  
  if (out != null) {
    
    // Close the underlying stream iff we own it...
    if (ownOutputStream) {
      out.close();
    } else {
      out.flush();
    }
    out = null;
  }
}
 
Example 6
Source File: HadoopCompressor.java    From presto with Apache License 2.0 5 votes vote down vote up
@Override
public CompressedSliceOutput get()
{
    try {
        compressor.reset();
        bufferedOutput.reset();
        CompressionOutputStream compressionStream = codec.createOutputStream(bufferedOutput, compressor);
        return new CompressedSliceOutput(compressionStream, bufferedOutput, this, () -> CodecPool.returnCompressor(compressor));
    }
    catch (IOException e) {
        throw new UncheckedIOException(e);
    }
}
 
Example 7
Source File: DefaultOutputter.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Override
public void close() throws IOException {
  try {
    writer.close();
  } finally {
    if (compressor != null) {
      CodecPool.returnCompressor(compressor);
    }
  }
}
 
Example 8
Source File: Compression.java    From hadoop with Apache License 2.0 5 votes vote down vote up
public void returnCompressor(Compressor compressor) {
  if (compressor != null) {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Return a compressor: " + compressor.hashCode());
    }
    CodecPool.returnCompressor(compressor);
  }
}
 
Example 9
Source File: DefaultOutputter.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public void close() throws IOException {
  try {
    writer.close();
  } finally {
    if (compressor != null) {
      CodecPool.returnCompressor(compressor);
    }
  }
}
 
Example 10
Source File: Compression.java    From big-c with Apache License 2.0 5 votes vote down vote up
public void returnCompressor(Compressor compressor) {
  if (compressor != null) {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Return a compressor: " + compressor.hashCode());
    }
    CodecPool.returnCompressor(compressor);
  }
}
 
Example 11
Source File: IFile.java    From incubator-tez with Apache License 2.0 4 votes vote down vote up
public void close() throws IOException {
  checkState(!closed.getAndSet(true), "Writer was already closed earlier");

  // When IFile writer is created by BackupStore, we do not have
  // Key and Value classes set. So, check before closing the
  // serializers
  if (keyClass != null) {
    keySerializer.close();
    valueSerializer.close();
  }

  // write V_END_MARKER as needed
  writeValueMarker(out);

  // Write EOF_MARKER for key/value length
  WritableUtils.writeVInt(out, EOF_MARKER);
  WritableUtils.writeVInt(out, EOF_MARKER);
  decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
  //account for header bytes
  decompressedBytesWritten += HEADER.length;

  //Flush the stream
  out.flush();

  if (compressOutput) {
    // Flush
    compressedOut.finish();
    compressedOut.resetState();
  }

  // Close the underlying stream iff we own it...
  if (ownOutputStream) {
    out.close();
  }
  else {
    // Write the checksum
    checksumOut.finish();
  }
  //header bytes are already included in rawOut
  compressedBytesWritten = rawOut.getPos() - start;

  if (compressOutput) {
    // Return back the compressor
    CodecPool.returnCompressor(compressor);
    compressor = null;
  }

  out = null;
  if (writtenRecordsCounter != null) {
    writtenRecordsCounter.increment(numRecordsWritten);
  }
  LOG.info("Total keys written=" + numRecordsWritten + "; Savings(optimized due to " +
      "multi-kv/rle)=" + totalKeySaving + "; number of RLEs written=" + rleWritten);
}
 
Example 12
Source File: Compression.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void returnCompressor(Compressor compressor) {
  if (compressor != null) {
    LOG.debug("Return a compressor: " + compressor.hashCode());
    CodecPool.returnCompressor(compressor);
  }
}
 
Example 13
Source File: IFile.java    From RDFS with Apache License 2.0 4 votes vote down vote up
public void close() throws IOException {
  
  // Close the serializers
  keySerializer.close();
  valueSerializer.close();

  // Write EOF_MARKER for key/value length
  WritableUtils.writeVInt(out, EOF_MARKER);
  WritableUtils.writeVInt(out, EOF_MARKER);
  decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
  
  //Flush the stream
  out.flush();
  
  if (compressOutput) {
    // Flush
    compressedOut.finish();
    compressedOut.resetState();
  }
  
  // Close the underlying stream iff we own it...
  if (ownOutputStream) {
    out.close();
  }
  else {
    // Write the checksum
    checksumOut.finish();
  }

  compressedBytesWritten = rawOut.getPos() - start;

  if (compressOutput) {
    // Return back the compressor
    CodecPool.returnCompressor(compressor);
    compressor = null;
  }

  out = null;
  if(writtenRecordsCounter != null) {
    writtenRecordsCounter.increment(numRecordsWritten);
  }
}
 
Example 14
Source File: DelimitedTextFileReaderWriterFactory.java    From secor with Apache License 2.0 4 votes vote down vote up
@Override
public void close() throws IOException {
    this.mWriter.close();
    CodecPool.returnCompressor(mCompressor);
    mCompressor = null;
}
 
Example 15
Source File: CodecFactory.java    From parquet-mr with Apache License 2.0 4 votes vote down vote up
@Override
public void release() {
  if (compressor != null) {
    CodecPool.returnCompressor(compressor);
  }
}
 
Example 16
Source File: Compression.java    From hbase with Apache License 2.0 4 votes vote down vote up
public void returnCompressor(Compressor compressor) {
  if (compressor != null) {
    if (LOG.isTraceEnabled()) LOG.trace("Returning compressor " + compressor + " to pool.");
    CodecPool.returnCompressor(compressor);
  }
}
 
Example 17
Source File: IFile.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
public void close() throws IOException {

      // Close the serializers
      keySerializer.close();
      valueSerializer.close();

      // Write EOF_MARKER for key/value length
      WritableUtils.writeVInt(out, EOF_MARKER);
      WritableUtils.writeVInt(out, EOF_MARKER);
      decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
      
      //Flush the stream
      out.flush();
  
      if (compressOutput) {
        // Flush
        compressedOut.finish();
        compressedOut.resetState();
      }
      
      // Close the underlying stream iff we own it...
      if (ownOutputStream) {
        out.close();
      }
      else {
        // Write the checksum
        checksumOut.finish();
      }

      compressedBytesWritten = rawOut.getPos() - start;

      if (compressOutput) {
        // Return back the compressor
        CodecPool.returnCompressor(compressor);
        compressor = null;
      }

      out = null;
      if(writtenRecordsCounter != null) {
        writtenRecordsCounter.increment(numRecordsWritten);
      }
    }
 
Example 18
Source File: IFile.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void close() throws IOException {

      // When IFile writer is created by BackupStore, we do not have
      // Key and Value classes set. So, check before closing the
      // serializers
      if (keyClass != null) {
        keySerializer.close();
        valueSerializer.close();
      }

      // Write EOF_MARKER for key/value length
      WritableUtils.writeVInt(out, EOF_MARKER);
      WritableUtils.writeVInt(out, EOF_MARKER);
      decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
      
      //Flush the stream
      out.flush();
  
      if (compressOutput) {
        // Flush
        compressedOut.finish();
        compressedOut.resetState();
      }
      
      // Close the underlying stream iff we own it...
      if (ownOutputStream) {
        out.close();
      }
      else {
        // Write the checksum
        checksumOut.finish();
      }

      compressedBytesWritten = rawOut.getPos() - start;

      if (compressOutput) {
        // Return back the compressor
        CodecPool.returnCompressor(compressor);
        compressor = null;
      }

      out = null;
      if(writtenRecordsCounter != null) {
        writtenRecordsCounter.increment(numRecordsWritten);
      }
    }
 
Example 19
Source File: IFile.java    From tez with Apache License 2.0 4 votes vote down vote up
public void close() throws IOException {
  if (closed.getAndSet(true)) {
    throw new IOException("Writer was already closed earlier");
  }

  // When IFile writer is created by BackupStore, we do not have
  // Key and Value classes set. So, check before closing the
  // serializers
  if (closeSerializers) {
    keySerializer.close();
    valueSerializer.close();
  }

  // write V_END_MARKER as needed
  writeValueMarker(out);

  // Write EOF_MARKER for key/value length
  WritableUtils.writeVInt(out, EOF_MARKER);
  WritableUtils.writeVInt(out, EOF_MARKER);
  decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
  //account for header bytes
  decompressedBytesWritten += HEADER.length;

  // Close the underlying stream iff we own it...
  if (ownOutputStream) {
    out.close();
  } else {
    if (compressOutput) {
      // Flush
      compressedOut.finish();
      compressedOut.resetState();
    }
    // Write the checksum and flush the buffer
    checksumOut.finish();
  }
  //header bytes are already included in rawOut
  compressedBytesWritten = rawOut.getPos() - start;

  if (compressOutput) {
    // Return back the compressor
    CodecPool.returnCompressor(compressor);
    compressor = null;
  }

  out = null;
  if (writtenRecordsCounter != null) {
    writtenRecordsCounter.increment(numRecordsWritten);
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Total keys written=" + numRecordsWritten + "; rleEnabled=" + rle + "; Savings" +
        "(due to multi-kv/rle)=" + totalKeySaving + "; number of RLEs written=" +
        rleWritten + "; compressedLen=" + compressedBytesWritten + "; rawLen="
        + decompressedBytesWritten);
  }
}
 
Example 20
Source File: IFile.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public void close() throws IOException {

      // When IFile writer is created by BackupStore, we do not have
      // Key and Value classes set. So, check before closing the
      // serializers
      if (keyClass != null) {
        keySerializer.close();
        valueSerializer.close();
      }

      // Write EOF_MARKER for key/value length
      WritableUtils.writeVInt(out, EOF_MARKER);
      WritableUtils.writeVInt(out, EOF_MARKER);
      decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
      
      //Flush the stream
      out.flush();
  
      if (compressOutput) {
        // Flush
        compressedOut.finish();
        compressedOut.resetState();
      }
      
      // Close the underlying stream iff we own it...
      if (ownOutputStream) {
        out.close();
      }
      else {
        // Write the checksum
        checksumOut.finish();
      }

      compressedBytesWritten = rawOut.getPos() - start;

      if (compressOutput) {
        // Return back the compressor
        CodecPool.returnCompressor(compressor);
        compressor = null;
      }

      out = null;
      if(writtenRecordsCounter != null) {
        writtenRecordsCounter.increment(numRecordsWritten);
      }
    }