Java Code Examples for org.apache.hadoop.hbase.HTableDescriptor#setMaxFileSize()

The following examples show how to use org.apache.hadoop.hbase.HTableDescriptor#setMaxFileSize() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: App.java    From hadoop-arch-book with Apache License 2.0 5 votes vote down vote up
private static boolean createTable(byte[] tableName, byte[] columnFamilyName,
    short regionCount, long regionMaxSize, HBaseAdmin admin)
    throws IOException {

  if (admin.tableExists(tableName)) {
    return false;
  }

  HTableDescriptor tableDescriptor = new HTableDescriptor();
  tableDescriptor.setName(tableName);

  HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName);

  columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY);
  columnDescriptor.setBlocksize(64 * 1024);
  columnDescriptor.setBloomFilterType(BloomType.ROW);
  columnDescriptor.setMaxVersions(10);
  tableDescriptor.addFamily(columnDescriptor);

  tableDescriptor.setMaxFileSize(regionMaxSize);
  tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY,
      ConstantSizeRegionSplitPolicy.class.getName());

  tableDescriptor.setDeferredLogFlush(true);

  regionCount = (short) Math.abs(regionCount);

  int regionRange = Short.MAX_VALUE / regionCount;
  int counter = 0;

  byte[][] splitKeys = new byte[regionCount][];
  for (byte[] splitKey : splitKeys) {
    counter = counter + regionRange;
    String key = StringUtils.leftPad(Integer.toString(counter), 5, '0');
    splitKey = Bytes.toBytes(key);
    System.out.println(" - Split: " + splitKey);
  }
  return true;
}
 
Example 2
Source File: Create3.java    From examples with Apache License 2.0 5 votes vote down vote up
public static void main(String[] args) throws MasterNotRunningException, ZooKeeperConnectionException, IOException {
  Configuration conf = HBaseConfiguration.create();
  HBaseAdmin admin = new HBaseAdmin(conf);
  // tag::CREATE3[]
  HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("crc"));
  desc.setMaxFileSize((long)20*1024*1024*1024);
  desc.setConfiguration("hbase.hstore.compaction.min", "5");
  HColumnDescriptor family = new HColumnDescriptor("c");
  family.setInMemory(true);
  desc.addFamily(family);
  UniformSplit uniformSplit = new UniformSplit();
  admin.createTable(desc, uniformSplit.split(64));
  // end::CREATE3[]
  admin.close();
}
 
Example 3
Source File: CreateTable.java    From HBase-ToHDFS with Apache License 2.0 5 votes vote down vote up
private static void createTable(String tableName, String columnFamilyName,
    short regionCount, long regionMaxSize, HBaseAdmin admin)
    throws IOException {
  System.out.println("Creating Table: " + tableName);
  
  HTableDescriptor tableDescriptor = new HTableDescriptor(); 
  tableDescriptor.setName(Bytes.toBytes(tableName));
  
  HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName);
  
  columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY);
  columnDescriptor.setBlocksize(64 * 1024);
  columnDescriptor.setBloomFilterType(BloomType.ROW);
  
  tableDescriptor.addFamily(columnDescriptor);
  
  tableDescriptor.setMaxFileSize(regionMaxSize);
  tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName());
  
  tableDescriptor.setDeferredLogFlush(true);
  
  regionCount = (short)Math.abs(regionCount);
  
  int regionRange = Short.MAX_VALUE/regionCount;
  int counter = 0;
  
  byte[][] splitKeys = new byte[regionCount][];
  for (int i = 0 ; i < splitKeys.length; i++) {
    counter = counter + regionRange;
    String key = StringUtils.leftPad(Integer.toString(counter), 5, '0');
    splitKeys[i] = Bytes.toBytes(key); 
    System.out.println(" - Split: " + i + " '" + key + "'");
  }
  
  admin.createTable(tableDescriptor, splitKeys);
}