Java Code Examples for org.apache.hadoop.hbase.HColumnDescriptor#setBlocksize()
The following examples show how to use
org.apache.hadoop.hbase.HColumnDescriptor#setBlocksize() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BasicHadoopTest.java From Kylin with Apache License 2.0 | 6 votes |
@Test public void testCreateHtable() throws IOException { HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf("testhbase")); tableDesc.setValue("KYLIN_HOST", "dev01"); HColumnDescriptor cf = new HColumnDescriptor("f"); cf.setMaxVersions(1); cf.setInMemory(true); cf.setBlocksize(4 * 1024 * 1024); // set to 4MB tableDesc.addFamily(cf); Configuration conf = HBaseConfiguration.create(); HBaseAdmin admin = new HBaseAdmin(conf); admin.createTable(tableDesc); admin.close(); }
Example 2
Source File: GridTableHBaseBenchmark.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
private static void createHTableIfNeeded(Connection conn, String tableName) throws IOException { Admin hbase = conn.getAdmin(); try { boolean tableExist = false; try { hbase.getTableDescriptor(TableName.valueOf(tableName)); tableExist = true; } catch (TableNotFoundException e) { //do nothing? } if (tableExist) { logger.info("HTable '{}' already exists", tableName); return; } logger.info("Creating HTable '{}'", tableName); HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor fd = new HColumnDescriptor(CF); fd.setBlocksize(CELL_SIZE); desc.addFamily(fd); hbase.createTable(desc); logger.info("HTable '{}' created", tableName); } finally { hbase.close(); } }
Example 3
Source File: GridTableHBaseBenchmark.java From kylin with Apache License 2.0 | 5 votes |
private static void createHTableIfNeeded(Connection conn, String tableName) throws IOException { Admin hbase = conn.getAdmin(); try { boolean tableExist = false; try { hbase.getTableDescriptor(TableName.valueOf(tableName)); tableExist = true; } catch (TableNotFoundException e) { //do nothing? } if (tableExist) { logger.info("HTable '{}' already exists", tableName); return; } logger.info("Creating HTable '{}'", tableName); HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor fd = new HColumnDescriptor(CF); fd.setBlocksize(CELL_SIZE); desc.addFamily(fd); hbase.createTable(desc); logger.info("HTable '{}' created", tableName); } finally { hbase.close(); } }
Example 4
Source File: HBaseCreateTable.java From SparkOnALog with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws IOException { if (args.length == 0) { System.out.println("CreateTable {tableName} {columnFamilyName}"); return; } String tableName = args[0]; String columnFamilyName = args[1]; HBaseAdmin admin = new HBaseAdmin(new Configuration()); HTableDescriptor tableDescriptor = new HTableDescriptor(); tableDescriptor.setName(Bytes.toBytes(tableName)); HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName); columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY); columnDescriptor.setBlocksize(64 * 1024); columnDescriptor.setBloomFilterType(BloomType.ROW); tableDescriptor.addFamily(columnDescriptor); //tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); System.out.println("-Creating Table"); admin.createTable(tableDescriptor); admin.close(); System.out.println("-Done"); }
Example 5
Source File: App.java From hadoop-arch-book with Apache License 2.0 | 5 votes |
private static boolean createTable(byte[] tableName, byte[] columnFamilyName, short regionCount, long regionMaxSize, HBaseAdmin admin) throws IOException { if (admin.tableExists(tableName)) { return false; } HTableDescriptor tableDescriptor = new HTableDescriptor(); tableDescriptor.setName(tableName); HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName); columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY); columnDescriptor.setBlocksize(64 * 1024); columnDescriptor.setBloomFilterType(BloomType.ROW); columnDescriptor.setMaxVersions(10); tableDescriptor.addFamily(columnDescriptor); tableDescriptor.setMaxFileSize(regionMaxSize); tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); tableDescriptor.setDeferredLogFlush(true); regionCount = (short) Math.abs(regionCount); int regionRange = Short.MAX_VALUE / regionCount; int counter = 0; byte[][] splitKeys = new byte[regionCount][]; for (byte[] splitKey : splitKeys) { counter = counter + regionRange; String key = StringUtils.leftPad(Integer.toString(counter), 5, '0'); splitKey = Bytes.toBytes(key); System.out.println(" - Split: " + splitKey); } return true; }
Example 6
Source File: GridTableHBaseBenchmark.java From Kylin with Apache License 2.0 | 5 votes |
private static void createHTableIfNeeded(HConnection conn, String tableName) throws IOException { HBaseAdmin hbase = new HBaseAdmin(conn); try { boolean tableExist = false; try { hbase.getTableDescriptor(TableName.valueOf(tableName)); tableExist = true; } catch (TableNotFoundException e) { } if (tableExist) { System.out.println("HTable '" + tableName + "' already exists"); return; } System.out.println("Creating HTable '" + tableName + "'"); HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor fd = new HColumnDescriptor(CF); fd.setBlocksize(CELL_SIZE); desc.addFamily(fd); hbase.createTable(desc); System.out.println("HTable '" + tableName + "' created"); } finally { hbase.close(); } }
Example 7
Source File: CreateTable.java From HBase-ToHDFS with Apache License 2.0 | 5 votes |
private static void createTable(String tableName, String columnFamilyName, short regionCount, long regionMaxSize, HBaseAdmin admin) throws IOException { System.out.println("Creating Table: " + tableName); HTableDescriptor tableDescriptor = new HTableDescriptor(); tableDescriptor.setName(Bytes.toBytes(tableName)); HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName); columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY); columnDescriptor.setBlocksize(64 * 1024); columnDescriptor.setBloomFilterType(BloomType.ROW); tableDescriptor.addFamily(columnDescriptor); tableDescriptor.setMaxFileSize(regionMaxSize); tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); tableDescriptor.setDeferredLogFlush(true); regionCount = (short)Math.abs(regionCount); int regionRange = Short.MAX_VALUE/regionCount; int counter = 0; byte[][] splitKeys = new byte[regionCount][]; for (int i = 0 ; i < splitKeys.length; i++) { counter = counter + regionRange; String key = StringUtils.leftPad(Integer.toString(counter), 5, '0'); splitKeys[i] = Bytes.toBytes(key); System.out.println(" - Split: " + i + " '" + key + "'"); } admin.createTable(tableDescriptor, splitKeys); }
Example 8
Source File: HBaseCreateTable.java From Kafka-Spark-Hbase-Example with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws IOException { if (args.length == 0) { System.out.println("CreateTable {tableName} {columnFamilyName}"); return; } String tableName = args[0]; String columnFamilyName = args[1]; HBaseAdmin admin = new HBaseAdmin(new Configuration()); HTableDescriptor tableDescriptor = new HTableDescriptor(); tableDescriptor.setName(Bytes.toBytes(tableName)); HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName); columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY); columnDescriptor.setBlocksize(64 * 1024); columnDescriptor.setBloomFilterType(BloomType.ROW); tableDescriptor.addFamily(columnDescriptor); //tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); System.out.println("-Creating Table"); admin.createTable(tableDescriptor); admin.close(); System.out.println("-Done"); }
Example 9
Source File: CubeHTableUtil.java From kylin-on-parquet-v2 with Apache License 2.0 | 4 votes |
public static HColumnDescriptor createColumnFamily(KylinConfig kylinConfig, String cfName, boolean isMemoryHungry) { HColumnDescriptor cf = new HColumnDescriptor(cfName); cf.setMaxVersions(1); if (isMemoryHungry) { cf.setBlocksize(kylinConfig.getHbaseDefaultBlockSize()); } else { cf.setBlocksize(kylinConfig.getHbaseSmallFamilyBlockSize()); } String hbaseDefaultCC = kylinConfig.getHbaseDefaultCompressionCodec().toLowerCase(Locale.ROOT); switch (hbaseDefaultCC) { case "snappy": { logger.info("hbase will use snappy to compress data"); cf.setCompressionType(Algorithm.SNAPPY); break; } case "lzo": { logger.info("hbase will use lzo to compress data"); cf.setCompressionType(Algorithm.LZO); break; } case "gz": case "gzip": { logger.info("hbase will use gzip to compress data"); cf.setCompressionType(Algorithm.GZ); break; } case "lz4": { logger.info("hbase will use lz4 to compress data"); cf.setCompressionType(Algorithm.LZ4); break; } case "none": default: { logger.info("hbase will not use any compression algorithm to compress data"); cf.setCompressionType(Algorithm.NONE); } } try { String encodingStr = kylinConfig.getHbaseDefaultEncoding(); DataBlockEncoding encoding = DataBlockEncoding.valueOf(encodingStr); cf.setDataBlockEncoding(encoding); } catch (Exception e) { logger.info("hbase will not use any encoding", e); cf.setDataBlockEncoding(DataBlockEncoding.NONE); } cf.setInMemory(false); cf.setBloomFilterType(BloomType.NONE); cf.setScope(kylinConfig.getHBaseReplicationScope()); return cf; }
Example 10
Source File: CubeHTableUtil.java From kylin with Apache License 2.0 | 4 votes |
public static HColumnDescriptor createColumnFamily(KylinConfig kylinConfig, String cfName, boolean isMemoryHungry) { HColumnDescriptor cf = new HColumnDescriptor(cfName); cf.setMaxVersions(1); if (isMemoryHungry) { cf.setBlocksize(kylinConfig.getHbaseDefaultBlockSize()); } else { cf.setBlocksize(kylinConfig.getHbaseSmallFamilyBlockSize()); } String hbaseDefaultCC = kylinConfig.getHbaseDefaultCompressionCodec().toLowerCase(Locale.ROOT); switch (hbaseDefaultCC) { case "snappy": { logger.info("hbase will use snappy to compress data"); cf.setCompressionType(Algorithm.SNAPPY); break; } case "lzo": { logger.info("hbase will use lzo to compress data"); cf.setCompressionType(Algorithm.LZO); break; } case "gz": case "gzip": { logger.info("hbase will use gzip to compress data"); cf.setCompressionType(Algorithm.GZ); break; } case "lz4": { logger.info("hbase will use lz4 to compress data"); cf.setCompressionType(Algorithm.LZ4); break; } case "none": default: { logger.info("hbase will not use any compression algorithm to compress data"); cf.setCompressionType(Algorithm.NONE); } } try { String encodingStr = kylinConfig.getHbaseDefaultEncoding(); DataBlockEncoding encoding = DataBlockEncoding.valueOf(encodingStr); cf.setDataBlockEncoding(encoding); } catch (Exception e) { logger.info("hbase will not use any encoding", e); cf.setDataBlockEncoding(DataBlockEncoding.NONE); } cf.setInMemory(false); cf.setBloomFilterType(BloomType.NONE); cf.setScope(kylinConfig.getHBaseReplicationScope()); return cf; }
Example 11
Source File: CreateHTableJob.java From Kylin with Apache License 2.0 | 4 votes |
@Override public int run(String[] args) throws Exception { Options options = new Options(); options.addOption(OPTION_CUBE_NAME); options.addOption(OPTION_PARTITION_FILE_PATH); options.addOption(OPTION_HTABLE_NAME); parseOptions(options, args); Path partitionFilePath = new Path(getOptionValue(OPTION_PARTITION_FILE_PATH)); String cubeName = getOptionValue(OPTION_CUBE_NAME).toUpperCase(); KylinConfig config = KylinConfig.getInstanceFromEnv(); CubeManager cubeMgr = CubeManager.getInstance(config); CubeInstance cube = cubeMgr.getCube(cubeName); CubeDesc cubeDesc = cube.getDescriptor(); String tableName = getOptionValue(OPTION_HTABLE_NAME).toUpperCase(); HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName)); // https://hbase.apache.org/apidocs/org/apache/hadoop/hbase/regionserver/ConstantSizeRegionSplitPolicy.html tableDesc.setValue(HTableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); tableDesc.setValue(IRealizationConstants.HTableTag, config.getMetadataUrlPrefix()); Configuration conf = HBaseConfiguration.create(getConf()); HBaseAdmin admin = new HBaseAdmin(conf); try { if (User.isHBaseSecurityEnabled(conf)) { // add coprocessor for bulk load tableDesc.addCoprocessor("org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint"); } for (HBaseColumnFamilyDesc cfDesc : cubeDesc.getHBaseMapping().getColumnFamily()) { HColumnDescriptor cf = new HColumnDescriptor(cfDesc.getName()); cf.setMaxVersions(1); if (LZOSupportnessChecker.getSupportness()) { logger.info("hbase will use lzo to compress data"); cf.setCompressionType(Algorithm.LZO); } else { logger.info("hbase will not use lzo to compress data"); } cf.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); cf.setInMemory(false); cf.setBlocksize(4 * 1024 * 1024); // set to 4MB tableDesc.addFamily(cf); } byte[][] splitKeys = getSplits(conf, partitionFilePath); if (admin.tableExists(tableName)) { // admin.disableTable(tableName); // admin.deleteTable(tableName); throw new RuntimeException("HBase table " + tableName + " exists!"); } DeployCoprocessorCLI.deployCoprocessor(tableDesc); admin.createTable(tableDesc, splitKeys); logger.info("create hbase table " + tableName + " done."); return 0; } catch (Exception e) { printUsage(options); e.printStackTrace(System.err); logger.error(e.getLocalizedMessage(), e); return 2; } finally { admin.close(); } }
Example 12
Source File: TableCommand.java From pinpoint with Apache License 2.0 | 4 votes |
private HColumnDescriptor newColumnDescriptor(ColumnFamilyChange columnFamilyChange) { HColumnDescriptor hcd = new HColumnDescriptor(columnFamilyChange.getName()); ColumnFamilyConfiguration columnFamilyConfiguration = columnFamilyChange.getColumnFamilyConfiguration(); Boolean blockCacheEnabled = columnFamilyConfiguration.getBlockCacheEnabled(); if (blockCacheEnabled != null) { hcd.setBlockCacheEnabled(blockCacheEnabled); } Integer replicationScope = columnFamilyConfiguration.getReplicationScope(); if (replicationScope != null) { hcd.setScope(replicationScope); } Boolean inMemory = columnFamilyConfiguration.getInMemory(); if (inMemory != null) { hcd.setInMemory(inMemory); } Integer timeToLive = columnFamilyConfiguration.getTimeToLive(); if (timeToLive != null) { hcd.setTimeToLive(timeToLive); } ColumnFamilyConfiguration.DataBlockEncoding dataBlockEncoding = columnFamilyConfiguration.getDataBlockEncoding(); if (dataBlockEncoding != null) { hcd.setDataBlockEncoding(DataBlockEncoding.valueOf(dataBlockEncoding.name())); } Integer blockSize = columnFamilyConfiguration.getBlockSize(); if (blockSize != null) { hcd.setBlocksize(blockSize); } Integer maxVersions = columnFamilyConfiguration.getMaxVersions(); if (maxVersions != null) { hcd.setMaxVersions(maxVersions); } Integer minVersions = columnFamilyConfiguration.getMinVersions(); if (minVersions != null) { hcd.setMinVersions(minVersions); } ColumnFamilyConfiguration.BloomFilter bloomFilter = columnFamilyConfiguration.getBloomFilter(); if (bloomFilter != null) { hcd.setBloomFilterType(BloomType.valueOf(bloomFilter.name())); } if (compressionAlgorithm != Compression.Algorithm.NONE) { hcd.setCompressionType(compressionAlgorithm); } return hcd; }