Java Code Examples for org.apache.hadoop.hbase.HColumnDescriptor.setCompressionType()

The following are Jave code examples for showing how to use setCompressionType() of the org.apache.hadoop.hbase.HColumnDescriptor class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
Example 1
Project: mumu-hbase   File: HBaseTableOperation.java   Source Code and License Vote up 6 votes
/**
 * 往表中添加列族
 *
 * @param tableName  表名
 * @param familyName 列族名
 */
public void addColumn(String tableName, String familyName) {
    HBaseConfiguration hBaseConfiguration = new HBaseConfiguration();
    Admin admin = hBaseConfiguration.admin();
    TableName tb = TableName.valueOf(tableName);
    try {
        if (admin.tableExists(tb)) {
            HColumnDescriptor columnDescriptor = new HColumnDescriptor(familyName);

            columnDescriptor.setMaxVersions(1);//设置列族保留的最多版本
            columnDescriptor.setCompressionType(Compression.Algorithm.GZ);//设置压缩算法
            columnDescriptor.setCompactionCompressionType(Compression.Algorithm.GZ);//合并压缩算法

            admin.addColumn(tb, columnDescriptor);
        } else {
            log.info("表名【" + tableName + "】不存在");
        }
    } catch (IOException e) {
        log.error(e);
    } finally {
        hBaseConfiguration.close();
    }
}
 
Example 2
Project: ditb   File: TestStore.java   Source Code and License Vote up 6 votes
/**
 * Verify that compression and data block encoding are respected by the
 * Store.createWriterInTmp() method, used on store flush.
 */
@Test
public void testCreateWriter() throws Exception {
  Configuration conf = HBaseConfiguration.create();
  FileSystem fs = FileSystem.get(conf);

  HColumnDescriptor hcd = new HColumnDescriptor(family);
  hcd.setCompressionType(Compression.Algorithm.GZ);
  hcd.setDataBlockEncoding(DataBlockEncoding.DIFF);
  init(name.getMethodName(), conf, hcd);

  // Test createWriterInTmp()
  StoreFile.Writer writer = store.createWriterInTmp(4, hcd.getCompression(), false, true, false);
  Path path = writer.getPath();
  writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1)));
  writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2)));
  writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3)));
  writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4)));
  writer.close();

  // Verify that compression and encoding settings are respected
  HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), conf);
  Assert.assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm());
  Assert.assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding());
  reader.close();
}
 
Example 3
Project: ditb   File: TestSCVFWithMiniCluster.java   Source Code and License Vote up 5 votes
private static void create(Admin admin, TableName tableName, byte[]... families)
    throws IOException {
  HTableDescriptor desc = new HTableDescriptor(tableName);
  for (byte[] family : families) {
    HColumnDescriptor colDesc = new HColumnDescriptor(family);
    colDesc.setMaxVersions(1);
    colDesc.setCompressionType(Algorithm.GZ);
    desc.addFamily(colDesc);
  }
  try {
    admin.createTable(desc);
  } catch (TableExistsException tee) {
    /* Ignore */
  }
}
 
Example 4
Project: ditb   File: IndexTableRelation.java   Source Code and License Vote up 5 votes
public static HColumnDescriptor getDefaultColumnDescriptor(byte[] family) {
  HColumnDescriptor colDesc = new HColumnDescriptor(family);
  //    colDesc.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF);
  colDesc.setDataBlockEncoding(DataBlockEncoding.NONE);
  colDesc.setCompressionType(Compression.Algorithm.NONE);
  return colDesc;
}
 
Example 5
Project: ditb   File: ChangeCompressionAction.java   Source Code and License Vote up 4 votes
@Override
public void perform() throws Exception {
  HTableDescriptor tableDescriptor = admin.getTableDescriptor(tableName);
  HColumnDescriptor[] columnDescriptors = tableDescriptor.getColumnFamilies();

  if (columnDescriptors == null || columnDescriptors.length == 0) {
    return;
  }

  // Possible compression algorithms. If an algorithm is not supported,
  // modifyTable will fail, so there is no harm.
  Algorithm[] possibleAlgos = Algorithm.values();

  // Since not every compression algorithm is supported,
  // let's use the same algorithm for all column families.

  // If an unsupported compression algorithm is chosen, pick a different one.
  // This is to work around the issue that modifyTable() does not throw remote
  // exception.
  Algorithm algo;
  do {
    algo = possibleAlgos[random.nextInt(possibleAlgos.length)];

    try {
      Compressor c = algo.getCompressor();

      // call returnCompressor() to release the Compressor
      algo.returnCompressor(c);
      break;
    } catch (Throwable t) {
      LOG.info("Performing action: Changing compression algorithms to " + algo +
              " is not supported, pick another one");
    }
  } while (true);

  LOG.debug("Performing action: Changing compression algorithms on "
    + tableName.getNameAsString() + " to " + algo);
  for (HColumnDescriptor descriptor : columnDescriptors) {
    if (random.nextBoolean()) {
      descriptor.setCompactionCompressionType(algo);
    } else {
      descriptor.setCompressionType(algo);
    }
  }

  // Don't try the modify if we're stopping
  if (context.isStopping()) {
    return;
  }

  admin.modifyTable(tableName, tableDescriptor);
}