org.apache.hadoop.io.file.tfile.Compression.Algorithm Java Examples

The following examples show how to use org.apache.hadoop.io.file.tfile.Compression.Algorithm. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: BCFile.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
Example #2
Source File: BCFile.java    From RDFS with Apache License 2.0 6 votes vote down vote up
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
 
Example #3
Source File: BCFile.java    From RDFS with Apache License 2.0 6 votes vote down vote up
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.get());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
Example #4
Source File: BCFile.java    From RDFS with Apache License 2.0 6 votes vote down vote up
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
 
Example #5
Source File: DTBCFile.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin, BlockRegion region, Configuration conf, Reader r) throws IOException
{
  this.compressAlgo = compressionAlgo;
  Decompressor decompressor = compressionAlgo.getDecompressor();
  this.region = region;
  try {

    InputStream in = compressAlgo.createDecompressionStream(new BoundedRangeFileInputStream(fsin, region.getOffset(), region.getCompressedSize()), decompressor, DTFile.getFSInputBufferSize(conf));
    int l = 1;
    r.baos.reset();
    byte[] buf = new byte[DTFile.getFSInputBufferSize(conf)];
    while (l >= 0) {
      l = in.read(buf);
      if (l > 0) {
        r.baos.write(buf, 0, l);
      }
    }
    // keep decompressed data into cache
    byte[] blockData = r.baos.toByteArray();
    rbain = new ReusableByteArrayInputStream(blockData);
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }

}
 
Example #6
Source File: DTBCFile.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
 
Example #7
Source File: DTBCFile.java    From attic-apex-malhar with Apache License 2.0 6 votes vote down vote up
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(DTFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
Example #8
Source File: BCFile.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.get());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
Example #9
Source File: BCFile.java    From big-c with Apache License 2.0 6 votes vote down vote up
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
 
Example #10
Source File: BCFile.java    From big-c with Apache License 2.0 6 votes vote down vote up
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
 
Example #11
Source File: BCFile.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
 
Example #12
Source File: BCFile.java    From hadoop with Apache License 2.0 6 votes vote down vote up
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
 
Example #13
Source File: BCFile.java    From big-c with Apache License 2.0 6 votes vote down vote up
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
Example #14
Source File: BCFile.java    From hadoop with Apache License 2.0 6 votes vote down vote up
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
 
Example #15
Source File: BCFile.java    From hadoop-gpu with Apache License 2.0 6 votes vote down vote up
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
 
Example #16
Source File: TestTFileLzoCodecsByteArrays.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }

  // TODO: sample the generated key/value records, and put the numbers below
  init(Compression.Algorithm.LZO.getName(), "memcmp", 2605, 2558);
  if (!skip)
    super.setUp();
}
 
Example #17
Source File: DTBCFile.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
private BlockReader createReader(Algorithm compressAlgo, BlockRegion region)
    throws IOException {
    BlockReader br = (BlockReader) CacheManager.get(region.getOffset() + this.toString());
    if(br==null){
      RBlockState rbs = new RBlockState(compressAlgo, in, region, conf, this);
      br = new BlockReader(rbs);
      String cacheKey = region.getOffset() + this.toString();
      CacheManager.put(cacheKey, br);
      cacheKeys.add(cacheKey);
    } else {
     br.renew();
    }
    return br;
}
 
Example #18
Source File: TestTFileLzoCodecsStreams.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }
  init(Compression.Algorithm.LZO.getName(), "memcmp");
  if (!skip)
    super.setUp();
}
 
Example #19
Source File: TestTFileLzoCodecsByteArrays.java    From attic-apex-malhar with Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }

  // TODO: sample the generated key/value records, and put the numbers below
  init(Compression.Algorithm.LZO.getName(), "memcmp", 2605, 2558);
  if (!skip)
    super.setUp();
}
 
Example #20
Source File: TestTFileLzoCodecsStreams.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }
  init(Compression.Algorithm.LZO.getName(), "memcmp", "TFileTestCodecsLzo");
  if (!skip) 
    super.setUp();
}
 
Example #21
Source File: TestTFileLzoCodecsByteArrays.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }

  // TODO: sample the generated key/value records, and put the numbers below
  init(Compression.Algorithm.LZO.getName(), "memcmp", 2605, 2558);
  if (!skip)
    super.setUp();
}
 
Example #22
Source File: TestTFileLzoCodecsStreams.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }
  init(Compression.Algorithm.LZO.getName(), "memcmp");
  if (!skip) 
    super.setUp();
}
 
Example #23
Source File: TestTFileLzoCodecsByteArrays.java    From RDFS with Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }

  // TODO: sample the generated key/value records, and put the numbers below
  init(Compression.Algorithm.LZO.getName(), "memcmp", "TFileTestCodecsLzo",
      2605, 2558);
  if (!skip)
    super.setUp();
}
 
Example #24
Source File: TestTFileLzoCodecsStreams.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }
  init(Compression.Algorithm.LZO.getName(), "memcmp", "TFileTestCodecsLzo");
  if (!skip) 
    super.setUp();
}
 
Example #25
Source File: TestTFileLzoCodecsByteArrays.java    From hadoop-gpu with Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }

  // TODO: sample the generated key/value records, and put the numbers below
  init(Compression.Algorithm.LZO.getName(), "memcmp", "TFileTestCodecsLzo",
      2605, 2558);
  if (!skip)
    super.setUp();
}
 
Example #26
Source File: TestTFileLzoCodecsStreams.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }
  init(Compression.Algorithm.LZO.getName(), "memcmp");
  if (!skip) 
    super.setUp();
}
 
Example #27
Source File: BCFile.java    From RDFS with Apache License 2.0 4 votes vote down vote up
MetaBlockRegister(String name, Algorithm compressAlgo) {
  this.name = name;
  this.compressAlgo = compressAlgo;
}
 
Example #28
Source File: BCFile.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
public Algorithm getDefaultCompressionAlgorithm() {
  return defaultCompressionAlgorithm;
}
 
Example #29
Source File: BCFile.java    From hadoop-gpu with Apache License 2.0 4 votes vote down vote up
public Algorithm getCompressionAlgorithm() {
  return compressionAlgorithm;
}
 
Example #30
Source File: BCFile.java    From RDFS with Apache License 2.0 4 votes vote down vote up
private BlockReader createReader(Algorithm compressAlgo, BlockRegion region)
    throws IOException {
  RBlockState rbs = new RBlockState(compressAlgo, in, region, conf);
  return new BlockReader(rbs);
}