Java Code Examples for org.apache.hadoop.hbase.io.compress.Compression.Algorithm

The following examples show how to use org.apache.hadoop.hbase.io.compress.Compression.Algorithm. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: antsdb   Source File: Helper.java    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
public static void createTable(Connection conn, String namespace, String tableName, Algorithm compressionType) {
    // Check whether table already exists
    if (Helper.existsTable(conn, namespace, tableName)) {
        Helper.dropTable(conn, namespace, tableName);
    }
    if (!Helper.existsTable(conn, namespace, tableName)) {
        
        // Create table
        try (Admin admin = conn.getAdmin()) {
        HTableDescriptor table = new HTableDescriptor(TableName.valueOf(namespace, tableName));
        table.addFamily(new HColumnDescriptor(DATA_COLUMN_FAMILY).setCompressionType(compressionType));
        _log.debug("creating table {}", table.toString());
        admin.createTable(table);
        } 
        catch (Exception ex) {
            throw new OrcaHBaseException(ex, "Failed to create table - " + tableName);
        }
    }
}
 
Example 2
Source Project: antsdb   Source File: Helper.java    License: GNU Lesser General Public License v3.0 6 votes vote down vote up
public static void truncateTable(Connection connection, String namespace, String tableName) {
    try {
    
        TableName table = TableName.valueOf(namespace, tableName);
        
        // get compression type
        Table htable = connection.getTable(table);          
        HTableDescriptor tableDesc = htable.getTableDescriptor();
        HColumnDescriptor[] families = tableDesc.getColumnFamilies();
        Algorithm compressionType =  families[0].getCompression();
        
        // drop table
        dropTable(connection, namespace, tableName);
        
        // create table
        createTable(connection, namespace, tableName, compressionType);

    } 
    catch (Exception ex) {
        throw new OrcaHBaseException("Failed to truncate table - " + tableName, ex);
    }
}
 
Example 3
Source Project: hbase   Source File: HBaseTestingUtility.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a pre-split table for load testing. If the table already exists,
 * logs a warning and continues.
 * @return the number of regions the table was split into
 */
public static int createPreSplitLoadTestTable(Configuration conf,
    TableName tableName, byte[] columnFamily, Algorithm compression,
    DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
    Durability durability)
        throws IOException {
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(tableName);
  tableDescriptor.setDurability(durability);
  tableDescriptor.setRegionReplication(regionReplication);
  ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
    new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(columnFamily);
  familyDescriptor.setDataBlockEncoding(dataBlockEncoding);
  familyDescriptor.setCompressionType(compression);
  return createPreSplitLoadTestTable(conf, tableDescriptor, familyDescriptor,
    numRegionsPerServer);
}
 
Example 4
Source Project: hbase   Source File: HBaseTestingUtility.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Creates a pre-split table for load testing. If the table already exists,
 * logs a warning and continues.
 * @return the number of regions the table was split into
 */
public static int createPreSplitLoadTestTable(Configuration conf,
    TableName tableName, byte[][] columnFamilies, Algorithm compression,
    DataBlockEncoding dataBlockEncoding, int numRegionsPerServer, int regionReplication,
    Durability durability)
        throws IOException {
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(tableName);
  tableDescriptor.setDurability(durability);
  tableDescriptor.setRegionReplication(regionReplication);
  ColumnFamilyDescriptor[] hcds = new ColumnFamilyDescriptor[columnFamilies.length];
  for (int i = 0; i < columnFamilies.length; i++) {
    ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
      new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(columnFamilies[i]);
    familyDescriptor.setDataBlockEncoding(dataBlockEncoding);
    familyDescriptor.setCompressionType(compression);
    hcds[i] = familyDescriptor;
  }
  return createPreSplitLoadTestTable(conf, tableDescriptor, hcds, numRegionsPerServer);
}
 
Example 5
Source Project: hbase   Source File: HBaseTestingUtility.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Create a set of column descriptors with the combination of compression,
 * encoding, bloom codecs available.
 * @param prefix family names prefix
 * @return the list of column descriptors
 */
public static List<ColumnFamilyDescriptor> generateColumnDescriptors(final String prefix) {
  List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>();
  long familyId = 0;
  for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) {
    for (DataBlockEncoding encodingType: DataBlockEncoding.values()) {
      for (BloomType bloomType: BloomType.values()) {
        String name = String.format("%[email protected]#&-%[email protected]#", prefix, familyId);
        ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder =
          ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name));
        columnFamilyDescriptorBuilder.setCompressionType(compressionType);
        columnFamilyDescriptorBuilder.setDataBlockEncoding(encodingType);
        columnFamilyDescriptorBuilder.setBloomFilterType(bloomType);
        columnFamilyDescriptors.add(columnFamilyDescriptorBuilder.build());
        familyId++;
      }
    }
  }
  return columnFamilyDescriptors;
}
 
Example 6
Source Project: hbase   Source File: TestSCVFWithMiniCluster.java    License: Apache License 2.0 6 votes vote down vote up
private static void create(Admin admin, TableName tableName, byte[]... families)
    throws IOException {
  TableDescriptorBuilder.ModifyableTableDescriptor tableDescriptor =
    new TableDescriptorBuilder.ModifyableTableDescriptor(tableName);
  for (byte[] family : families) {
    ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor familyDescriptor =
      new ColumnFamilyDescriptorBuilder.ModifyableColumnFamilyDescriptor(family);
    familyDescriptor.setMaxVersions(1);
    familyDescriptor.setCompressionType(Algorithm.GZ);
    tableDescriptor.setColumnFamily(familyDescriptor);
  }
  try {
    admin.createTable(tableDescriptor);
  } catch (TableExistsException tee) {
    /* Ignore */
  }
}
 
Example 7
Source Project: hbase   Source File: TestHFileBlock.java    License: Apache License 2.0 6 votes vote down vote up
static HFileBlock.Writer createTestV2Block(Compression.Algorithm algo,
    boolean includesMemstoreTS, boolean includesTag) throws IOException {
  final BlockType blockType = BlockType.DATA;
  HFileContext meta = new HFileContextBuilder()
                      .withCompression(algo)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTag)
                      .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                      .build();
  HFileBlock.Writer hbw = new HFileBlock.Writer(null, meta);
  DataOutputStream dos = hbw.startWriting(blockType);
  writeTestBlockContents(dos);
  dos.flush();
  hbw.ensureBlockReady();
  assertEquals(1000 * 4, hbw.getUncompressedSizeWithoutHeader());
  hbw.release();
  return hbw;
}
 
Example 8
Source Project: hbase   Source File: TestHFileBlock.java    License: Apache License 2.0 6 votes vote down vote up
static void assertBuffersEqual(ByteBuff expectedBuffer,
    ByteBuff actualBuffer, Compression.Algorithm compression,
    DataBlockEncoding encoding, boolean pread) {
  if (!actualBuffer.equals(expectedBuffer)) {
    int prefix = 0;
    int minLimit = Math.min(expectedBuffer.limit(), actualBuffer.limit());
    while (prefix < minLimit &&
        expectedBuffer.get(prefix) == actualBuffer.get(prefix)) {
      prefix++;
    }

    fail(String.format(
        "Content mismatch for %s, commonPrefix %d, expected %s, got %s",
        buildMessageDetails(compression, encoding, pread), prefix,
        nextBytesToStr(expectedBuffer, prefix),
        nextBytesToStr(actualBuffer, prefix)));
  }
}
 
Example 9
Source Project: hbase   Source File: TestHFileDataBlockEncoder.java    License: Apache License 2.0 6 votes vote down vote up
private HFileBlock getSampleHFileBlock(List<KeyValue> kvs, boolean useTag) {
  ByteBuffer keyValues = RedundantKVGenerator.convertKvToByteBuffer(kvs, includesMemstoreTS);
  int size = keyValues.limit();
  ByteBuffer buf = ByteBuffer.allocate(size + HConstants.HFILEBLOCK_HEADER_SIZE);
  buf.position(HConstants.HFILEBLOCK_HEADER_SIZE);
  keyValues.rewind();
  buf.put(keyValues);
  HFileContext meta = new HFileContextBuilder()
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(useTag)
                      .withHBaseCheckSum(true)
                      .withCompression(Algorithm.NONE)
                      .withBlockSize(0)
                      .withChecksumType(ChecksumType.NULL)
                      .build();
  HFileBlock b = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
      HFileBlock.FILL_HEADER, 0, 0, -1, meta, ByteBuffAllocator.HEAP);
  return b;
}
 
Example 10
Source Project: hbase   Source File: TestHFileScannerImplReferenceCount.java    License: Apache License 2.0 6 votes vote down vote up
private void writeHFile(Configuration conf, FileSystem fs, Path hfilePath, Algorithm compression,
    DataBlockEncoding encoding, int cellCount) throws IOException {
  HFileContext context =
      new HFileContextBuilder().withBlockSize(1).withDataBlockEncoding(DataBlockEncoding.NONE)
          .withCompression(compression).withDataBlockEncoding(encoding).build();
  try (HFile.Writer writer =
      new HFile.WriterFactory(conf, new CacheConfig(conf)).withPath(fs, hfilePath)
          .withFileContext(context).create()) {
    Random rand = new Random(9713312); // Just a fixed seed.
    for (int i = 0; i < cellCount; ++i) {
      byte[] keyBytes = Bytes.add(Bytes.toBytes(i), SUFFIX);

      // A random-length random value.
      byte[] valueBytes = RandomKeyValueUtil.randomValue(rand);
      KeyValue keyValue =
          new KeyValue(keyBytes, FAMILY, QUALIFIER, HConstants.LATEST_TIMESTAMP, valueBytes);
      if (firstCell == null) {
        firstCell = keyValue;
      } else if (secondCell == null) {
        secondCell = keyValue;
      }
      writer.append(keyValue);
    }
  }
}
 
Example 11
Source Project: hbase   Source File: TestHFileScannerImplReferenceCount.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testDisabledBlockCache() throws Exception {
  writeHFile(conf, fs, hfilePath, Algorithm.NONE, DataBlockEncoding.NONE, CELL_COUNT);
  // Set LruBlockCache
  conf.setFloat(HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
  BlockCache defaultBC = BlockCacheFactory.createBlockCache(conf);
  Assert.assertNull(defaultBC);
  CacheConfig cacheConfig = new CacheConfig(conf, null, defaultBC, allocator);
  Assert.assertFalse(cacheConfig.isCombinedBlockCache()); // Must be LruBlockCache.
  HFile.Reader reader = HFile.createReader(fs, hfilePath, cacheConfig, true, conf);
  Assert.assertTrue(reader instanceof HFileReaderImpl);
  // We've build a HFile tree with index = 16.
  Assert.assertEquals(16, reader.getTrailer().getNumDataIndexLevels());

  HFileBlock block1 = reader.getDataBlockIndexReader()
      .loadDataBlockWithScanInfo(firstCell, null, true, true, false,
          DataBlockEncoding.NONE, reader).getHFileBlock();

  Assert.assertTrue(block1.isSharedMem());
  Assert.assertTrue(block1 instanceof SharedMemHFileBlock);
  Assert.assertEquals(1, block1.refCnt());
  Assert.assertTrue(block1.release());
}
 
Example 12
Source Project: hbase   Source File: TestDataBlockEncoders.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * Test whether the decompression of first key is implemented correctly.
 * @throws IOException
 */
@Test
public void testFirstKeyInBlockOnSample() throws IOException {
  List<KeyValue> sampleKv = generator.generateTestKeyValues(NUMBER_OF_KV, includesTags);

  for (DataBlockEncoding encoding : DataBlockEncoding.values()) {
    if (encoding.getEncoder() == null) {
      continue;
    }
    DataBlockEncoder encoder = encoding.getEncoder();
    ByteBuffer encodedBuffer = encodeKeyValues(encoding, sampleKv,
        getEncodingContext(Compression.Algorithm.NONE, encoding), this.useOffheapData);
    Cell key = encoder.getFirstKeyCellInBlock(new SingleByteBuff(encodedBuffer));
    KeyValue firstKv = sampleKv.get(0);
    if (0 != PrivateCellUtil.compareKeyIgnoresMvcc(CellComparatorImpl.COMPARATOR, key, firstKv)) {
      int commonPrefix = PrivateCellUtil.findCommonPrefixInFlatKey(key, firstKv, false, true);
      fail(String.format("Bug in '%s' commonPrefix %d", encoder.toString(), commonPrefix));
    }
  }
}
 
Example 13
Source Project: hbase   Source File: TestDataBlockEncoders.java    License: Apache License 2.0 6 votes vote down vote up
@Test
public void testRowIndexWithTagsButNoTagsInCell() throws IOException {
  List<KeyValue> kvList = new ArrayList<>();
  byte[] row = new byte[0];
  byte[] family = new byte[0];
  byte[] qualifier = new byte[0];
  byte[] value = new byte[0];
  KeyValue expectedKV = new KeyValue(row, family, qualifier, 1L, Type.Put, value);
  kvList.add(expectedKV);
  DataBlockEncoding encoding = DataBlockEncoding.ROW_INDEX_V1;
  DataBlockEncoder encoder = encoding.getEncoder();
  ByteBuffer encodedBuffer =
      encodeKeyValues(encoding, kvList, getEncodingContext(Algorithm.NONE, encoding), false);
  HFileContext meta =
      new HFileContextBuilder().withHBaseCheckSum(false).withIncludesMvcc(includesMemstoreTS)
          .withIncludesTags(includesTags).withCompression(Compression.Algorithm.NONE).build();
  DataBlockEncoder.EncodedSeeker seeker =
    encoder.createSeeker(encoder.newDataBlockDecodingContext(meta));
  seeker.setCurrentBuffer(new SingleByteBuff(encodedBuffer));
  Cell cell = seeker.getCell();
  Assert.assertEquals(expectedKV.getLength(), ((KeyValue) cell).getLength());
}
 
Example 14
Source Project: hbase   Source File: TestDataBlockEncoders.java    License: Apache License 2.0 6 votes vote down vote up
private void testAlgorithm(byte[] encodedData, ByteBuffer unencodedDataBuf,
    DataBlockEncoder encoder) throws IOException {
  // decode
  ByteArrayInputStream bais = new ByteArrayInputStream(encodedData, ENCODED_DATA_OFFSET,
      encodedData.length - ENCODED_DATA_OFFSET);
  DataInputStream dis = new DataInputStream(bais);
  ByteBuffer actualDataset;
  HFileContext meta = new HFileContextBuilder().withHBaseCheckSum(false)
      .withIncludesMvcc(includesMemstoreTS).withIncludesTags(includesTags)
      .withCompression(Compression.Algorithm.NONE).build();
  actualDataset = encoder.decodeKeyValues(dis, encoder.newDataBlockDecodingContext(meta));
  actualDataset.rewind();

  // this is because in case of prefix tree the decoded stream will not have
  // the
  // mvcc in it.
  assertEquals("Encoding -> decoding gives different results for " + encoder,
      Bytes.toStringBinary(unencodedDataBuf), Bytes.toStringBinary(actualDataset));
}
 
Example 15
Source Project: hbase   Source File: TestHFileOutputFormat2.java    License: Apache License 2.0 6 votes vote down vote up
private void setupMockColumnFamiliesForCompression(Table table,
    Map<String, Compression.Algorithm> familyToCompression) throws IOException {

  TableDescriptorBuilder mockTableDescriptor =
    TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]);
  for (Entry<String, Compression.Algorithm> entry : familyToCompression.entrySet()) {
    ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder
      .newBuilder(Bytes.toBytes(entry.getKey()))
      .setMaxVersions(1)
      .setCompressionType(entry.getValue())
      .setBlockCacheEnabled(false)
      .setTimeToLive(0)
      .build();

    mockTableDescriptor.setColumnFamily(columnFamilyDescriptor);
  }
  Mockito.doReturn(mockTableDescriptor.build()).when(table).getDescriptor();
}
 
Example 16
Source Project: hbase   Source File: TestHFileOutputFormat2.java    License: Apache License 2.0 6 votes vote down vote up
/**
 * @return a map from column family names to compression algorithms for
 *         testing column family compression. Column family names have special characters
 */
private Map<String, Compression.Algorithm>
    getMockColumnFamiliesForCompression (int numCfs) {
  Map<String, Compression.Algorithm> familyToCompression = new HashMap<>();
  // use column family names having special characters
  if (numCfs-- > 0) {
    familyToCompression.put("[email protected]#[email protected]#&", Compression.Algorithm.LZO);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.SNAPPY);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family2=asdads&!AASD", Compression.Algorithm.GZ);
  }
  if (numCfs-- > 0) {
    familyToCompression.put("Family3", Compression.Algorithm.NONE);
  }
  return familyToCompression;
}
 
Example 17
Source Project: examples   Source File: Create2.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
  Configuration conf = HBaseConfiguration.create();
  HBaseAdmin admin = new HBaseAdmin(conf);
  // tag::CREATE2[]
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("pages"));
  byte[][] splits = {Bytes.toBytes("b"), Bytes.toBytes("f"),
    Bytes.toBytes("k"), Bytes.toBytes("n"), Bytes.toBytes("t")};
  desc.setValue(Bytes.toBytes("comment"), Bytes.toBytes("Create 10012014"));
  HColumnDescriptor family = new HColumnDescriptor("c");
  family.setCompressionType(Algorithm.GZ);
  family.setMaxVersions(52);
  family.setBloomFilterType(BloomType.ROW);
  desc.addFamily(family);
  admin.createTable(desc, splits);
  // end::CREATE2[]
  admin.close();
}
 
Example 18
Source Project: examples   Source File: CreateTable.java    License: Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws MasterNotRunningException,
    ZooKeeperConnectionException, IOException {
  try (Connection connection = ConnectionFactory.createConnection();
      Admin admin = connection.getAdmin();) {
    LOG.info("Starting table creation");
    // tag::CREATE[]
    TableName documents = TableName.valueOf("documents");
    HTableDescriptor desc = new HTableDescriptor(documents);
    HColumnDescriptor family = new HColumnDescriptor("c");
    family.setCompressionType(Algorithm.GZ);
    family.setBloomFilterType(BloomType.NONE);
    desc.addFamily(family);
    UniformSplit uniformSplit = new UniformSplit();
    admin.createTable(desc, uniformSplit.split(8));
    // end::CREATE[]
    LOG.info("Table successfuly created");
  }
}
 
Example 19
Source Project: kylin-on-parquet-v2   Source File: HFileOutputFormat3.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Runs inside the task to deserialize column family to compression algorithm
 * map from the configuration.
 *
 * @param conf to read the serialized values from
 * @return a map from column family to the configured compression algorithm
 */
@VisibleForTesting
static Map<byte[], Algorithm> createFamilyCompressionMap(Configuration conf) {
    Map<byte[], String> stringMap = createFamilyConfValueMap(conf, COMPRESSION_FAMILIES_CONF_KEY);
    Map<byte[], Algorithm> compressionMap = new TreeMap<byte[], Algorithm>(Bytes.BYTES_COMPARATOR);
    for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
        Algorithm algorithm = AbstractHFileWriter.compressionByName(e.getValue());
        compressionMap.put(e.getKey(), algorithm);
    }
    return compressionMap;
}
 
Example 20
@Override
public void open(File home, ConfigService antsdbConfig, boolean isMutable) throws Exception {
    this.isMutable = isMutable;
    
    // options used by hbase service
    this.bufferSize = antsdbConfig.getHBaseBufferSize();
    this.maxColumnPerPut = antsdbConfig.getHBaseMaxColumnsPerPut();        
    String compressCodec = antsdbConfig.getHBaseCompressionCodec();
    this.compressionType = Algorithm.valueOf(compressCodec.toUpperCase());
    this.sysns = antsdbConfig.getSystemNamespace();
    _log.info("system namespace: {}", this.sysns);
    this.tnCheckpoint = TableName.valueOf(this.sysns, TABLE_SYNC_PARAM);
    
    // Configuration object, first try to find hbase-site.xml, then the embedded hbase/zookeeper settings
    
    try {
        this.hbaseConfig = getHBaseConfig(antsdbConfig);
        getConnection();
        _log.info("HBase is connected ");

        // Initialize HBase database for antsdb
        init();
    }
    catch (Throwable x) {
        if (this.hbaseConnection != null) {
            this.hbaseConnection.close();
        }
        throw x;
    }
}
 
Example 21
Source Project: kylin   Source File: HFileOutputFormat3.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Runs inside the task to deserialize column family to compression algorithm
 * map from the configuration.
 *
 * @param conf to read the serialized values from
 * @return a map from column family to the configured compression algorithm
 */
@VisibleForTesting
static Map<byte[], Algorithm> createFamilyCompressionMap(Configuration conf) {
    Map<byte[], String> stringMap = createFamilyConfValueMap(conf, COMPRESSION_FAMILIES_CONF_KEY);
    Map<byte[], Algorithm> compressionMap = new TreeMap<byte[], Algorithm>(Bytes.BYTES_COMPARATOR);
    for (Map.Entry<byte[], String> e : stringMap.entrySet()) {
        Algorithm algorithm = AbstractHFileWriter.compressionByName(e.getValue());
        compressionMap.put(e.getKey(), algorithm);
    }
    return compressionMap;
}
 
Example 22
Source Project: hbase   Source File: HBaseTestingUtility.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Create all combinations of Bloom filters and compression algorithms for
 * testing.
 */
private static List<Object[]> bloomAndCompressionCombinations() {
  List<Object[]> configurations = new ArrayList<>();
  for (Compression.Algorithm comprAlgo :
       HBaseCommonTestingUtility.COMPRESSION_ALGORITHMS) {
    for (BloomType bloomType : BloomType.values()) {
      configurations.add(new Object[] { comprAlgo, bloomType });
    }
  }
  return Collections.unmodifiableList(configurations);
}
 
Example 23
Source Project: hbase   Source File: HBaseTestingUtility.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Creates a pre-split table for load testing. If the table already exists,
 * logs a warning and continues.
 * @return the number of regions the table was split into
 */
public static int createPreSplitLoadTestTable(Configuration conf,
    TableName tableName, byte[] columnFamily, Algorithm compression,
    DataBlockEncoding dataBlockEncoding) throws IOException {
  return createPreSplitLoadTestTable(conf, tableName,
    columnFamily, compression, dataBlockEncoding, DEFAULT_REGIONS_PER_SERVER, 1,
    Durability.USE_DEFAULT);
}
 
Example 24
Source Project: hbase   Source File: HBaseTestingUtility.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Get supported compression algorithms.
 * @return supported compression algorithms.
 */
public static Compression.Algorithm[] getSupportedCompressionAlgorithms() {
  String[] allAlgos = HFile.getSupportedCompressionAlgorithms();
  List<Compression.Algorithm> supportedAlgos = new ArrayList<>();
  for (String algoName : allAlgos) {
    try {
      Compression.Algorithm algo = Compression.getCompressionAlgorithmByName(algoName);
      algo.getCompressor();
      supportedAlgos.add(algo);
    } catch (Throwable t) {
      // this algo is not available
    }
  }
  return supportedAlgos.toArray(new Algorithm[supportedAlgos.size()]);
}
 
Example 25
Source Project: hbase   Source File: TestHFileBlock.java    License: Apache License 2.0 5 votes vote down vote up
public byte[] createTestV1Block(Compression.Algorithm algo)
    throws IOException {
  Compressor compressor = algo.getCompressor();
  ByteArrayOutputStream baos = new ByteArrayOutputStream();
  OutputStream os = algo.createCompressionStream(baos, compressor, 0);
  DataOutputStream dos = new DataOutputStream(os);
  BlockType.META.write(dos); // Let's make this a meta block.
  writeTestBlockContents(dos);
  dos.flush();
  algo.returnCompressor(compressor);
  return baos.toByteArray();
}
 
Example 26
Source Project: hbase   Source File: TestHFileBlock.java    License: Apache License 2.0 5 votes vote down vote up
public String createTestBlockStr(Compression.Algorithm algo,
    int correctLength, boolean useTag) throws IOException {
  HFileBlock.Writer hbw = createTestV2Block(algo, includesMemstoreTS, useTag);
  byte[] testV2Block = hbw.getHeaderAndDataForTest();
  int osOffset = HConstants.HFILEBLOCK_HEADER_SIZE + 9;
  if (testV2Block.length == correctLength) {
    // Force-set the "OS" field of the gzip header to 3 (Unix) to avoid
    // variations across operating systems.
    // See http://www.gzip.org/zlib/rfc-gzip.html for gzip format.
    // We only make this change when the compressed block length matches.
    // Otherwise, there are obviously other inconsistencies.
    testV2Block[osOffset] = 3;
  }
  return Bytes.toStringBinary(testV2Block);
}
 
Example 27
Source Project: hbase   Source File: TestHFileBlock.java    License: Apache License 2.0 5 votes vote down vote up
protected void testBlockHeapSizeInternals() {
  if (ClassSize.is32BitJVM()) {
    assertEquals(64, HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE);
  } else {
    assertEquals(80, HFileBlock.MULTI_BYTE_BUFFER_HEAP_SIZE);
  }

  for (int size : new int[] { 100, 256, 12345 }) {
    byte[] byteArr = new byte[HConstants.HFILEBLOCK_HEADER_SIZE + size];
    ByteBuffer buf = ByteBuffer.wrap(byteArr, 0, size);
    HFileContext meta = new HFileContextBuilder()
                        .withIncludesMvcc(includesMemstoreTS)
                        .withIncludesTags(includesTag)
                        .withHBaseCheckSum(false)
                        .withCompression(Algorithm.NONE)
                        .withBytesPerCheckSum(HFile.DEFAULT_BYTES_PER_CHECKSUM)
                        .withChecksumType(ChecksumType.NULL).build();
    HFileBlock block = new HFileBlock(BlockType.DATA, size, size, -1, ByteBuff.wrap(buf),
        HFileBlock.FILL_HEADER, -1, 0, -1, meta, HEAP);
    long byteBufferExpectedSize =
        ClassSize.align(ClassSize.estimateBase(new MultiByteBuff(buf).getClass(), true)
            + HConstants.HFILEBLOCK_HEADER_SIZE + size);
    long hfileMetaSize = ClassSize.align(ClassSize.estimateBase(HFileContext.class, true));
    long hfileBlockExpectedSize = ClassSize.align(ClassSize.estimateBase(HFileBlock.class, true));
    long expected = hfileBlockExpectedSize + byteBufferExpectedSize + hfileMetaSize;
    assertEquals("Block data size: " + size + ", byte buffer expected " +
        "size: " + byteBufferExpectedSize + ", HFileBlock class expected " +
        "size: " + hfileBlockExpectedSize + " HFileContext class expected size: "
            + hfileMetaSize + "; ", expected,
        block.heapSize());
  }
}
 
Example 28
Source Project: hbase   Source File: TestHFileWriterV3.java    License: Apache License 2.0 5 votes vote down vote up
private void testMidKeyInHFileInternals(boolean useTags) throws IOException {
  Path hfilePath = new Path(TEST_UTIL.getDataTestDir(),
  "testMidKeyInHFile");
  Compression.Algorithm compressAlgo = Compression.Algorithm.NONE;
  int entryCount = 50000;
  writeDataAndReadFromHFile(hfilePath, compressAlgo, entryCount, true, useTags);
}
 
Example 29
Source Project: hbase   Source File: TestHFileDataBlockEncoder.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * Test encoding with offheap keyvalue. This test just verifies if the encoders
 * work with DBB and does not use the getXXXArray() API
 * @throws IOException
 */
@Test
public void testEncodingWithOffheapKeyValue() throws IOException {
  // usually we have just block without headers, but don't complicate that
  try {
    List<Cell> kvs = generator.generateTestExtendedOffheapKeyValues(60, true);
    HFileContext meta = new HFileContextBuilder().withIncludesMvcc(includesMemstoreTS)
        .withIncludesTags(true).withHBaseCheckSum(true).withCompression(Algorithm.NONE)
        .withBlockSize(0).withChecksumType(ChecksumType.NULL).build();
    writeBlock(kvs, meta, true);
  } catch (IllegalArgumentException e) {
    fail("No exception should have been thrown");
  }
}
 
Example 30
Source Project: hbase   Source File: TestDataBlockEncoders.java    License: Apache License 2.0 5 votes vote down vote up
private HFileBlockEncodingContext getEncodingContext(Compression.Algorithm algo,
    DataBlockEncoding encoding) {
  DataBlockEncoder encoder = encoding.getEncoder();
  HFileContext meta = new HFileContextBuilder()
                      .withHBaseCheckSum(false)
                      .withIncludesMvcc(includesMemstoreTS)
                      .withIncludesTags(includesTags)
                      .withCompression(algo).build();
  if (encoder != null) {
    return encoder.newDataBlockEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
  } else {
    return new HFileBlockDefaultEncodingContext(encoding, HFILEBLOCK_DUMMY_HEADER, meta);
  }
}