org.apache.hadoop.io.compress.Decompressor Java Examples

The following examples show how to use org.apache.hadoop.io.compress.Decompressor. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestZlibCompressorDecompressor.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testZlibCompressorDecompressorSetDictionary() {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (ZlibFactory.isNativeZlibLoaded(conf)) {
    Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
    Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);

    checkSetDictionaryNullPointerException(zlibCompressor);
    checkSetDictionaryNullPointerException(zlibDecompressor);

    checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
    checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
  } else {
    assertTrue("ZlibFactory is using native libs against request",
        ZlibFactory.isNativeZlibLoaded(conf));
  }
}
 
Example #2
Source File: HadoopFileReader.java    From hadoopoffice with Apache License 2.0 6 votes vote down vote up
public InputStream openFile(Path path) throws IOException {
        CompressionCodec codec=compressionCodecs.getCodec(path);
 	FSDataInputStream fileIn=fs.open(path);
	// check if compressed
	if (codec==null) { // uncompressed
	LOG.debug("Reading from an uncompressed file \""+path+"\"");
		return fileIn;
	} else { // compressed
		Decompressor decompressor = CodecPool.getDecompressor(codec);
		this.openDecompressors.add(decompressor); // to be returned later using close
		if (codec instanceof SplittableCompressionCodec) {
			LOG.debug("Reading from a compressed file \""+path+"\" with splittable compression codec");
			long end = fs.getFileStatus(path).getLen(); 
        		return ((SplittableCompressionCodec)codec).createInputStream(fileIn, decompressor, 0, end,SplittableCompressionCodec.READ_MODE.CONTINUOUS);
      		} else {
			LOG.debug("Reading from a compressed file \""+path+"\" with non-splittable compression codec");
        		return codec.createInputStream(fileIn,decompressor);
      		}
	}
}
 
Example #3
Source File: MapReduceExcelInputIntegrationTest.java    From hadoopoffice with Apache License 2.0 6 votes vote down vote up
private InputStream openFile(Path path) throws IOException {
        CompressionCodec codec=new CompressionCodecFactory(miniCluster.getConfig()).getCodec(path);
 	FSDataInputStream fileIn=dfsCluster.getFileSystem().open(path);
	// check if compressed
	if (codec==null) { // uncompressed
		return fileIn;
	} else { // compressed
		Decompressor decompressor = CodecPool.getDecompressor(codec);
		this.openDecompressors.add(decompressor); // to be returned later using close
		if (codec instanceof SplittableCompressionCodec) {
			long end = dfsCluster.getFileSystem().getFileStatus(path).getLen(); 
        		final SplitCompressionInputStream cIn =((SplittableCompressionCodec)codec).createInputStream(fileIn, decompressor, 0, end,SplittableCompressionCodec.READ_MODE.CONTINUOUS);
					return cIn;
      		} else {
        		return codec.createInputStream(fileIn,decompressor);
      		}
	}
}
 
Example #4
Source File: TestZlibCompressorDecompressor.java    From hadoop with Apache License 2.0 6 votes vote down vote up
@Test
public void testZlibCompressorDecompressorSetDictionary() {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (ZlibFactory.isNativeZlibLoaded(conf)) {
    Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
    Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);

    checkSetDictionaryNullPointerException(zlibCompressor);
    checkSetDictionaryNullPointerException(zlibDecompressor);

    checkSetDictionaryArrayIndexOutOfBoundsException(zlibDecompressor);
    checkSetDictionaryArrayIndexOutOfBoundsException(zlibCompressor);
  } else {
    assertTrue("ZlibFactory is using native libs against request",
        ZlibFactory.isNativeZlibLoaded(conf));
  }
}
 
Example #5
Source File: Compression.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public Decompressor getDecompressor() throws IOException {
  CompressionCodec codec = getCodec();
  if (codec != null) {
    Decompressor decompressor = CodecPool.getDecompressor(codec);
    if (decompressor != null) {
      if (decompressor.finished()) {
        // Somebody returns the decompressor to CodecPool but is still using
        // it.
        LOG.warn("Deompressor obtained from CodecPool already finished()");
      } else {
        LOG.debug("Got a decompressor: " + decompressor.hashCode());
      }
      /**
       * Following statement is necessary to get around bugs in 0.18 where a
       * decompressor is referenced after returned back to the codec pool.
       */
      decompressor.reset();
    }
    return decompressor;
  }

  return null;
}
 
Example #6
Source File: Compression.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public Decompressor getDecompressor() throws IOException {
  CompressionCodec codec = getCodec();
  if (codec != null) {
    Decompressor decompressor = CodecPool.getDecompressor(codec);
    if (decompressor != null) {
      if (decompressor.finished()) {
        // Somebody returns the decompressor to CodecPool but is still using
        // it.
        LOG.warn("Deompressor obtained from CodecPool already finished()");
      } else {
        if(LOG.isDebugEnabled()) {
          LOG.debug("Got a decompressor: " + decompressor.hashCode());
        }
      }
      /**
       * Following statement is necessary to get around bugs in 0.18 where a
       * decompressor is referenced after returned back to the codec pool.
       */
      decompressor.reset();
    }
    return decompressor;
  }

  return null;
}
 
Example #7
Source File: MapReduceBitcoinTransactionIntegrationTest.java    From hadoopcryptoledger with Apache License 2.0 6 votes vote down vote up
private InputStream openFile(Path path) throws IOException {
        CompressionCodec codec=new CompressionCodecFactory(miniCluster.getConfig()).getCodec(path);
 	FSDataInputStream fileIn=dfsCluster.getFileSystem().open(path);
	// check if compressed
	if (codec==null) { // uncompressed
		return fileIn;
	} else { // compressed
		Decompressor decompressor = CodecPool.getDecompressor(codec);
		this.openDecompressors.add(decompressor); // to be returned later using close
		if (codec instanceof SplittableCompressionCodec) {
			long end = dfsCluster.getFileSystem().getFileStatus(path).getLen(); 
        		final SplitCompressionInputStream cIn =((SplittableCompressionCodec)codec).createInputStream(fileIn, decompressor, 0, end,SplittableCompressionCodec.READ_MODE.CONTINUOUS);
					return cIn;
      		} else {
        		return codec.createInputStream(fileIn,decompressor);
      		}
	}
}
 
Example #8
Source File: MapReduceEthereumBlockIntegrationTest.java    From hadoopcryptoledger with Apache License 2.0 6 votes vote down vote up
private InputStream openFile(Path path) throws IOException {
        CompressionCodec codec=new CompressionCodecFactory(miniCluster.getConfig()).getCodec(path);
 	FSDataInputStream fileIn=dfsCluster.getFileSystem().open(path);
	// check if compressed
	if (codec==null) { // uncompressed
		return fileIn;
	} else { // compressed
		Decompressor decompressor = CodecPool.getDecompressor(codec);
		this.openDecompressors.add(decompressor); // to be returned later using close
		if (codec instanceof SplittableCompressionCodec) {
			long end = dfsCluster.getFileSystem().getFileStatus(path).getLen(); 
        		final SplitCompressionInputStream cIn =((SplittableCompressionCodec)codec).createInputStream(fileIn, decompressor, 0, end,SplittableCompressionCodec.READ_MODE.CONTINUOUS);
					return cIn;
      		} else {
        		return codec.createInputStream(fileIn,decompressor);
      		}
	}
}
 
Example #9
Source File: Spark2BitcoinBlockCounterSparkMasterIntegrationTest.java    From hadoopcryptoledger with Apache License 2.0 6 votes vote down vote up
private InputStream openFile(Path path) throws IOException {
        CompressionCodec codec=new CompressionCodecFactory(conf).getCodec(path);
 	FSDataInputStream fileIn=dfsCluster.getFileSystem().open(path);
	// check if compressed
	if (codec==null) { // uncompressed
		return fileIn;
	} else { // compressed
		Decompressor decompressor = CodecPool.getDecompressor(codec);
		this.openDecompressors.add(decompressor); // to be returned later using close
		if (codec instanceof SplittableCompressionCodec) {
			long end = dfsCluster.getFileSystem().getFileStatus(path).getLen(); 
        		final SplitCompressionInputStream cIn =((SplittableCompressionCodec)codec).createInputStream(fileIn, decompressor, 0, end,SplittableCompressionCodec.READ_MODE.CONTINUOUS);
					return cIn;
      		} else {
        		return codec.createInputStream(fileIn,decompressor);
      		}
	}
}
 
Example #10
Source File: MapReduceBitcoinBlockIntegrationTest.java    From hadoopcryptoledger with Apache License 2.0 6 votes vote down vote up
private InputStream openFile(Path path) throws IOException {
        CompressionCodec codec=new CompressionCodecFactory(miniCluster.getConfig()).getCodec(path);
 	FSDataInputStream fileIn=dfsCluster.getFileSystem().open(path);
	// check if compressed
	if (codec==null) { // uncompressed
		return fileIn;
	} else { // compressed
		Decompressor decompressor = CodecPool.getDecompressor(codec);
		this.openDecompressors.add(decompressor); // to be returned later using close
		if (codec instanceof SplittableCompressionCodec) {
			long end = dfsCluster.getFileSystem().getFileStatus(path).getLen(); 
        		final SplitCompressionInputStream cIn =((SplittableCompressionCodec)codec).createInputStream(fileIn, decompressor, 0, end,SplittableCompressionCodec.READ_MODE.CONTINUOUS);
					return cIn;
      		} else {
        		return codec.createInputStream(fileIn,decompressor);
      		}
	}
}
 
Example #11
Source File: TestZlibCompressorDecompressor.java    From big-c with Apache License 2.0 6 votes vote down vote up
@Test
public void testZlibCompressorDecompressorWithConfiguration() {
  Configuration conf = new Configuration();
  conf.setBoolean(CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY, true);
  if (ZlibFactory.isNativeZlibLoaded(conf)) {
    byte[] rawData;
    int tryNumber = 5;
    int BYTE_SIZE = 10 * 1024;
    Compressor zlibCompressor = ZlibFactory.getZlibCompressor(conf);
    Decompressor zlibDecompressor = ZlibFactory.getZlibDecompressor(conf);
    rawData = generate(BYTE_SIZE);
    try {
      for (int i = 0; i < tryNumber; i++)
        compressDecompressZlib(rawData, (ZlibCompressor) zlibCompressor,
            (ZlibDecompressor) zlibDecompressor);
      zlibCompressor.reinit(conf);
    } catch (Exception ex) {
      fail("testZlibCompressorDecompressorWithConfiguration ex error " + ex);
    }
  } else {
    assertTrue("ZlibFactory is using native libs against request",
        ZlibFactory.isNativeZlibLoaded(conf));
  }
}
 
Example #12
Source File: HadoopUtils.java    From incubator-hivemall with Apache License 2.0 6 votes vote down vote up
public static BufferedReader getBufferedReader(File file, MapredContext context)
        throws IOException {
    URI fileuri = file.toURI();
    Path path = new Path(fileuri);

    Configuration conf = context.getJobConf();
    CompressionCodecFactory ccf = new CompressionCodecFactory(conf);
    CompressionCodec codec = ccf.getCodec(path);

    if (codec == null) {
        return new BufferedReader(new FileReader(file));
    } else {
        Decompressor decompressor = CodecPool.getDecompressor(codec);
        FileInputStream fis = new FileInputStream(file);
        CompressionInputStream cis = codec.createInputStream(fis, decompressor);
        BufferedReader br = new BufferedReaderExt(new InputStreamReader(cis), decompressor);
        return br;
    }
}
 
Example #13
Source File: DTBCFile.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin, BlockRegion region, Configuration conf, Reader r) throws IOException
{
  this.compressAlgo = compressionAlgo;
  Decompressor decompressor = compressionAlgo.getDecompressor();
  this.region = region;
  try {

    InputStream in = compressAlgo.createDecompressionStream(new BoundedRangeFileInputStream(fsin, region.getOffset(), region.getCompressedSize()), decompressor, DTFile.getFSInputBufferSize(conf));
    int l = 1;
    r.baos.reset();
    byte[] buf = new byte[DTFile.getFSInputBufferSize(conf)];
    while (l >= 0) {
      l = in.read(buf);
      if (l > 0) {
        r.baos.write(buf, 0, l);
      }
    }
    // keep decompressed data into cache
    byte[] blockData = r.baos.toByteArray();
    rbain = new ReusableByteArrayInputStream(blockData);
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }

}
 
Example #14
Source File: TestShuffleUtils.java    From tez with Apache License 2.0 6 votes vote down vote up
@Test
public void testInternalErrorTranslation() throws Exception {
  String codecErrorMsg = "codec failure";
  CompressionInputStream mockCodecStream = mock(CompressionInputStream.class);
  when(mockCodecStream.read(any(byte[].class), anyInt(), anyInt()))
      .thenThrow(new InternalError(codecErrorMsg));
  Decompressor mockDecoder = mock(Decompressor.class);
  CompressionCodec mockCodec = mock(ConfigurableCodecForTest.class);
  when(mockCodec.createDecompressor()).thenReturn(mockDecoder);
  when(mockCodec.createInputStream(any(InputStream.class), any(Decompressor.class)))
      .thenReturn(mockCodecStream);
  byte[] header = new byte[] { (byte) 'T', (byte) 'I', (byte) 'F', (byte) 1};
  try {
    ShuffleUtils.shuffleToMemory(new byte[1024], new ByteArrayInputStream(header),
        1024, 128, mockCodec, false, 0, mock(Logger.class), null);
    Assert.fail("shuffle was supposed to throw!");
  } catch (IOException e) {
    Assert.assertTrue(e.getCause() instanceof InternalError);
    Assert.assertTrue(e.getMessage().contains(codecErrorMsg));
  }
}
 
Example #15
Source File: Compression.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public Decompressor getDecompressor() throws IOException {
  CompressionCodec codec = getCodec();
  if (codec != null) {
    Decompressor decompressor = CodecPool.getDecompressor(codec);
    if (decompressor != null) {
      if (decompressor.finished()) {
        // Somebody returns the decompressor to CodecPool but is still using
        // it.
        LOG.warn("Deompressor obtained from CodecPool already finished()");
      } else {
        LOG.debug("Got a decompressor: " + decompressor.hashCode());
      }
      /**
       * Following statement is necessary to get around bugs in 0.18 where a
       * decompressor is referenced after returned back to the codec pool.
       */
      decompressor.reset();
    }
    return decompressor;
  }

  return null;
}
 
Example #16
Source File: CellBlockBuilder.java    From hbase with Apache License 2.0 6 votes vote down vote up
private ByteBuffer decompress(CompressionCodec compressor, InputStream cellBlockStream,
    int osInitialSize) throws IOException {
  // GZIPCodec fails w/ NPE if no configuration.
  if (compressor instanceof Configurable) {
    ((Configurable) compressor).setConf(this.conf);
  }
  Decompressor poolDecompressor = CodecPool.getDecompressor(compressor);
  CompressionInputStream cis = compressor.createInputStream(cellBlockStream, poolDecompressor);
  ByteBufferOutputStream bbos;
  try {
    // TODO: This is ugly. The buffer will be resized on us if we guess wrong.
    // TODO: Reuse buffers.
    bbos = new ByteBufferOutputStream(osInitialSize);
    IOUtils.copy(cis, bbos);
    bbos.close();
    return bbos.getByteBuffer();
  } finally {
    CodecPool.returnDecompressor(poolDecompressor);
  }
}
 
Example #17
Source File: Compression.java    From hbase with Apache License 2.0 6 votes vote down vote up
public Decompressor getDecompressor() {
  CompressionCodec codec = getCodec(conf);
  if (codec != null) {
    Decompressor decompressor = CodecPool.getDecompressor(codec);
    if (LOG.isTraceEnabled()) LOG.trace("Retrieved decompressor " + decompressor + " from pool.");
    if (decompressor != null) {
      if (decompressor.finished()) {
        // Somebody returns the decompressor to CodecPool but is still using it.
        LOG.warn("Deompressor obtained from CodecPool is already finished()");
      }
      decompressor.reset();
    }
    return decompressor;
  }

  return null;
}
 
Example #18
Source File: MapReduceEthereumBlockIntegrationTest.java    From hadoopcryptoledger with Apache License 2.0 5 votes vote down vote up
@AfterEach
   public void tearDown() throws IOException {
// Remove input and output directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR,true);
dfsCluster.getFileSystem().delete(DFS_OUTPUT_DIR,true);
// close any open decompressor
for (Decompressor currentDecompressor: this.openDecompressors) {
	if (currentDecompressor!=null) {
		 CodecPool.returnDecompressor(currentDecompressor);
	}
	}

   }
 
Example #19
Source File: Compression.java    From hbase with Apache License 2.0 5 votes vote down vote up
public InputStream createDecompressionStream(
    InputStream downStream, Decompressor decompressor,
    int downStreamBufferSize) throws IOException {
  CompressionCodec codec = getCodec(conf);
  // Set the internal buffer size to read from down stream.
  if (downStreamBufferSize > 0) {
    ((Configurable)codec).getConf().setInt("io.file.buffer.size",
        downStreamBufferSize);
  }
  CompressionInputStream cis =
      codec.createInputStream(downStream, decompressor);
  BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
  return bis2;

}
 
Example #20
Source File: Compression.java    From hbase with Apache License 2.0 5 votes vote down vote up
public void returnDecompressor(Decompressor decompressor) {
  if (decompressor != null) {
    if (LOG.isTraceEnabled()) LOG.trace("Returning decompressor " + decompressor + " to pool.");
    CodecPool.returnDecompressor(decompressor);
    if (decompressor.getClass().isAnnotationPresent(DoNotPool.class)) {
      if (LOG.isTraceEnabled()) LOG.trace("Ending decompressor " + decompressor);
      decompressor.end();
    }
  }
}
 
Example #21
Source File: BGZFCodec.java    From Hadoop-BAM with MIT License 5 votes vote down vote up
@Override
public SplitCompressionInputStream createInputStream(InputStream seekableIn,
    Decompressor decompressor, long start, long end, READ_MODE readMode) throws IOException {
  BGZFSplitGuesser splitGuesser = new BGZFSplitGuesser(seekableIn);
  long adjustedStart = splitGuesser.guessNextBGZFBlockStart(start, end);
  ((Seekable)seekableIn).seek(adjustedStart);
  return new BGZFSplitCompressionInputStream(seekableIn, adjustedStart, end);
}
 
Example #22
Source File: MapReduceBitcoinBlockIntegrationTest.java    From hadoopcryptoledger with Apache License 2.0 5 votes vote down vote up
@AfterEach
   public void tearDown() throws IOException {
// Remove input and output directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR,true);
dfsCluster.getFileSystem().delete(DFS_OUTPUT_DIR,true);
// close any open decompressor
for (Decompressor currentDecompressor: this.openDecompressors) {
	if (currentDecompressor!=null) {
		 CodecPool.returnDecompressor(currentDecompressor);
	}
	}

   }
 
Example #23
Source File: ImportRecordReader.java    From emr-dynamodb-connector with Apache License 2.0 5 votes vote down vote up
public ImportRecordReader(JobConf job, Path path) throws IOException {
  FileSystem fs = path.getFileSystem(job);
  FSDataInputStream fileIn = fs.open(path);
  CompressionCodecFactory compressionCodecs = new CompressionCodecFactory(job);
  CompressionCodec codec = compressionCodecs.getCodec(path);
  if (null != codec) {
    Decompressor decompressor = CodecPool.getDecompressor(codec);
    this.lineReader = new LineReader(codec.createInputStream(fileIn, decompressor), job);
  } else {
    this.lineReader = new LineReader(fileIn, job);
  }
}
 
Example #24
Source File: Compression.java    From big-c with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized InputStream createDecompressionStream(
    InputStream downStream, Decompressor decompressor,
    int downStreamBufferSize) throws IOException {
  if (downStreamBufferSize > 0) {
    return new BufferedInputStream(downStream, downStreamBufferSize);
  }
  return downStream;
}
 
Example #25
Source File: MapReduceExcelInputIntegrationTest.java    From hadoopoffice with Apache License 2.0 5 votes vote down vote up
@AfterEach
   public void tearDown() throws IOException {
// Remove input and output directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR,true);
dfsCluster.getFileSystem().delete(DFS_OUTPUT_DIR,true);
// close any open decompressor
for (Decompressor currentDecompressor: this.openDecompressors) {
	if (currentDecompressor!=null) {
		 CodecPool.returnDecompressor(currentDecompressor);
	}
	}

   }
 
Example #26
Source File: MapReduceExcelOutputIntegrationTest.java    From hadoopoffice with Apache License 2.0 5 votes vote down vote up
@AfterEach
   public void tearDown() throws IOException {
// Remove input and output directory
dfsCluster.getFileSystem().delete(DFS_INPUT_DIR,true);
dfsCluster.getFileSystem().delete(DFS_OUTPUT_DIR,true);
// close any open decompressor
for (Decompressor currentDecompressor: this.openDecompressors) {
	if (currentDecompressor!=null) {
		 CodecPool.returnDecompressor(currentDecompressor);
	}
	}

   }
 
Example #27
Source File: CodecPool.java    From incubator-tajo with Apache License 2.0 5 votes vote down vote up
/**
 * Return the {@link Decompressor} to the pool.
 *
 * @param decompressor
 *          the <code>Decompressor</code> to be returned to the pool
 */
public static void returnDecompressor(Decompressor decompressor) {
  if (decompressor == null) {
    return;
  }
  // if the decompressor can't be reused, don't pool it.
  if (decompressor.getClass().isAnnotationPresent(DoNotPool.class)) {
    return;
  }
  decompressor.reset();
  payback(DECOMPRESSOR_POOL, decompressor);
}
 
Example #28
Source File: Compression.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized InputStream createDecompressionStream(
    InputStream downStream, Decompressor decompressor,
    int downStreamBufferSize) throws IOException {
  // Set the internal buffer size to read from down stream.
  if (downStreamBufferSize > 0) {
    codec.getConf().setInt("io.file.buffer.size", downStreamBufferSize);
  }
  CompressionInputStream cis =
      codec.createInputStream(downStream, decompressor);
  BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
  return bis2;
}
 
Example #29
Source File: CodecPool.java    From incubator-tajo with Apache License 2.0 5 votes vote down vote up
/**
 * Get a {@link Decompressor} for the given {@link CompressionCodec} from the
 * pool or a new one.
 *
 * @param codec
 *          the <code>CompressionCodec</code> for which to get the
 *          <code>Decompressor</code>
 * @return <code>Decompressor</code> for the given
 *         <code>CompressionCodec</code> the pool or a new one
 */
public static Decompressor getDecompressor(CompressionCodec codec) {
  Decompressor decompressor = borrow(DECOMPRESSOR_POOL, codec
      .getDecompressorType());
  if (decompressor == null) {
    decompressor = codec.createDecompressor();
    LOG.info("Got brand-new decompressor ["+codec.getDefaultExtension()+"]");
  } else {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Got recycled decompressor");
    }
  }
  return decompressor;
}
 
Example #30
Source File: Compression.java    From RDFS with Apache License 2.0 5 votes vote down vote up
@Override
public synchronized InputStream createDecompressionStream(
    InputStream downStream, Decompressor decompressor,
    int downStreamBufferSize) throws IOException {
  // Set the internal buffer size to read from down stream.
  if (downStreamBufferSize > 0) {
    codec.getConf().setInt("io.file.buffer.size", downStreamBufferSize);
  }
  CompressionInputStream cis =
      codec.createInputStream(downStream, decompressor);
  BufferedInputStream bis2 = new BufferedInputStream(cis, DATA_IBUF_SIZE);
  return bis2;
}