Java Code Examples for org.apache.hadoop.io.file.tfile.Compression.Algorithm

The following examples show how to use org.apache.hadoop.io.file.tfile.Compression.Algorithm. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: hadoop   Source File: BCFile.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
Example 2
Source Project: hadoop   Source File: BCFile.java    License: Apache License 2.0 6 votes vote down vote up
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
 
Example 3
Source Project: hadoop   Source File: BCFile.java    License: Apache License 2.0 6 votes vote down vote up
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
 
Example 4
Source Project: big-c   Source File: BCFile.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
Example 5
Source Project: big-c   Source File: BCFile.java    License: Apache License 2.0 6 votes vote down vote up
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
 
Example 6
Source Project: big-c   Source File: BCFile.java    License: Apache License 2.0 6 votes vote down vote up
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
 
Example 7
Source Project: attic-apex-malhar   Source File: DTBCFile.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(DTFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.getBytes());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
Example 8
Source Project: attic-apex-malhar   Source File: DTBCFile.java    License: Apache License 2.0 6 votes vote down vote up
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
 
Example 9
Source Project: attic-apex-malhar   Source File: DTBCFile.java    License: Apache License 2.0 6 votes vote down vote up
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin, BlockRegion region, Configuration conf, Reader r) throws IOException
{
  this.compressAlgo = compressionAlgo;
  Decompressor decompressor = compressionAlgo.getDecompressor();
  this.region = region;
  try {

    InputStream in = compressAlgo.createDecompressionStream(new BoundedRangeFileInputStream(fsin, region.getOffset(), region.getCompressedSize()), decompressor, DTFile.getFSInputBufferSize(conf));
    int l = 1;
    r.baos.reset();
    byte[] buf = new byte[DTFile.getFSInputBufferSize(conf)];
    while (l >= 0) {
      l = in.read(buf);
      if (l > 0) {
        r.baos.write(buf, 0, l);
      }
    }
    // keep decompressed data into cache
    byte[] blockData = r.baos.toByteArray();
    rbain = new ReusableByteArrayInputStream(blockData);
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }

}
 
Example 10
Source Project: RDFS   Source File: BCFile.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.get());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
Example 11
Source Project: RDFS   Source File: BCFile.java    License: Apache License 2.0 6 votes vote down vote up
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
 
Example 12
Source Project: RDFS   Source File: BCFile.java    License: Apache License 2.0 6 votes vote down vote up
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
 
Example 13
Source Project: hadoop-gpu   Source File: BCFile.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @param compressionAlgo
 *          The compression algorithm to be used to for compression.
 * @throws IOException
 */
public WBlockState(Algorithm compressionAlgo, FSDataOutputStream fsOut,
    BytesWritable fsOutputBuffer, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.fsOut = fsOut;
  this.posStart = fsOut.getPos();

  fsOutputBuffer.setCapacity(TFile.getFSOutputBufferSize(conf));

  this.fsBufferedOutput =
      new SimpleBufferedOutputStream(this.fsOut, fsOutputBuffer.get());
  this.compressor = compressAlgo.getCompressor();

  try {
    this.out =
        compressionAlgo.createCompressionStream(fsBufferedOutput,
            compressor, 0);
  } catch (IOException e) {
    compressAlgo.returnCompressor(compressor);
    throw e;
  }
}
 
Example 14
Source Project: hadoop-gpu   Source File: BCFile.java    License: Apache License 2.0 6 votes vote down vote up
private BlockAppender prepareMetaBlock(String name, Algorithm compressAlgo)
    throws IOException, MetaBlockAlreadyExists {
  if (blkInProgress == true) {
    throw new IllegalStateException(
        "Cannot create Meta Block until previous block is closed.");
  }

  if (metaIndex.getMetaByName(name) != null) {
    throw new MetaBlockAlreadyExists("name=" + name);
  }

  MetaBlockRegister mbr = new MetaBlockRegister(name, compressAlgo);
  WBlockState wbs =
      new WBlockState(compressAlgo, out, fsOutputBuffer, conf);
  BlockAppender ba = new BlockAppender(mbr, wbs);
  blkInProgress = true;
  metaBlkSeen = true;
  return ba;
}
 
Example 15
Source Project: hadoop-gpu   Source File: BCFile.java    License: Apache License 2.0 6 votes vote down vote up
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin,
    BlockRegion region, Configuration conf) throws IOException {
  this.compressAlgo = compressionAlgo;
  this.region = region;
  this.decompressor = compressionAlgo.getDecompressor();

  try {
    this.in =
        compressAlgo
            .createDecompressionStream(new BoundedRangeFileInputStream(
                fsin, this.region.getOffset(), this.region
                    .getCompressedSize()), decompressor, TFile
                .getFSInputBufferSize(conf));
  } catch (IOException e) {
    compressAlgo.returnDecompressor(decompressor);
    throw e;
  }
}
 
Example 16
Source Project: hadoop   Source File: TestTFileLzoCodecsStreams.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }
  init(Compression.Algorithm.LZO.getName(), "memcmp");
  if (!skip) 
    super.setUp();
}
 
Example 17
Source Project: hadoop   Source File: TestTFileLzoCodecsByteArrays.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }

  // TODO: sample the generated key/value records, and put the numbers below
  init(Compression.Algorithm.LZO.getName(), "memcmp", 2605, 2558);
  if (!skip)
    super.setUp();
}
 
Example 18
Source Project: big-c   Source File: TestTFileLzoCodecsStreams.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }
  init(Compression.Algorithm.LZO.getName(), "memcmp");
  if (!skip) 
    super.setUp();
}
 
Example 19
Source Project: big-c   Source File: TestTFileLzoCodecsByteArrays.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }

  // TODO: sample the generated key/value records, and put the numbers below
  init(Compression.Algorithm.LZO.getName(), "memcmp", 2605, 2558);
  if (!skip)
    super.setUp();
}
 
Example 20
Source Project: attic-apex-malhar   Source File: DTBCFile.java    License: Apache License 2.0 5 votes vote down vote up
private BlockReader createReader(Algorithm compressAlgo, BlockRegion region)
    throws IOException {
    BlockReader br = (BlockReader) CacheManager.get(region.getOffset() + this.toString());
    if(br==null){
      RBlockState rbs = new RBlockState(compressAlgo, in, region, conf, this);
      br = new BlockReader(rbs);
      String cacheKey = region.getOffset() + this.toString();
      CacheManager.put(cacheKey, br);
      cacheKeys.add(cacheKey);
    } else {
     br.renew();
    }
    return br;
}
 
Example 21
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }
  init(Compression.Algorithm.LZO.getName(), "memcmp");
  if (!skip)
    super.setUp();
}
 
Example 22
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }

  // TODO: sample the generated key/value records, and put the numbers below
  init(Compression.Algorithm.LZO.getName(), "memcmp", 2605, 2558);
  if (!skip)
    super.setUp();
}
 
Example 23
Source Project: RDFS   Source File: TestTFileLzoCodecsStreams.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }
  init(Compression.Algorithm.LZO.getName(), "memcmp", "TFileTestCodecsLzo");
  if (!skip) 
    super.setUp();
}
 
Example 24
Source Project: RDFS   Source File: TestTFileLzoCodecsByteArrays.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }

  // TODO: sample the generated key/value records, and put the numbers below
  init(Compression.Algorithm.LZO.getName(), "memcmp", "TFileTestCodecsLzo",
      2605, 2558);
  if (!skip)
    super.setUp();
}
 
Example 25
Source Project: hadoop-gpu   Source File: TestTFileLzoCodecsStreams.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }
  init(Compression.Algorithm.LZO.getName(), "memcmp", "TFileTestCodecsLzo");
  if (!skip) 
    super.setUp();
}
 
Example 26
Source Project: hadoop-gpu   Source File: TestTFileLzoCodecsByteArrays.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test LZO compression codec, using the same test cases as in the ByteArrays.
 */
@Override
public void setUp() throws IOException {
  skip = !(Algorithm.LZO.isSupported());
  if (skip) {
    System.out.println("Skipped");
  }

  // TODO: sample the generated key/value records, and put the numbers below
  init(Compression.Algorithm.LZO.getName(), "memcmp", "TFileTestCodecsLzo",
      2605, 2558);
  if (!skip)
    super.setUp();
}
 
Example 27
Source Project: hadoop   Source File: BCFile.java    License: Apache License 2.0 4 votes vote down vote up
private Algorithm getDefaultCompressionAlgorithm() {
  return dataIndex.getDefaultCompressionAlgorithm();
}
 
Example 28
Source Project: hadoop   Source File: BCFile.java    License: Apache License 2.0 4 votes vote down vote up
MetaBlockRegister(String name, Algorithm compressAlgo) {
  this.name = name;
  this.compressAlgo = compressAlgo;
}
 
Example 29
Source Project: hadoop   Source File: BCFile.java    License: Apache License 2.0 4 votes vote down vote up
private BlockReader createReader(Algorithm compressAlgo, BlockRegion region)
    throws IOException {
  RBlockState rbs = new RBlockState(compressAlgo, in, region, conf);
  return new BlockReader(rbs);
}
 
Example 30
Source Project: hadoop   Source File: BCFile.java    License: Apache License 2.0 4 votes vote down vote up
public MetaIndexEntry(String metaName, Algorithm compressionAlgorithm,
    BlockRegion region) {
  this.metaName = metaName;
  this.compressionAlgorithm = compressionAlgorithm;
  this.region = region;
}