Java Code Examples for org.apache.hadoop.hbase.regionserver.StoreFile.BloomType

The following examples show how to use org.apache.hadoop.hbase.regionserver.StoreFile.BloomType. These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: gemfirexd-oss   Source File: HFileSortedOplog.java    License: Apache License 2.0 6 votes vote down vote up
public HFileSortedOplogWriter() throws IOException {
      writer = HFile.getWriterFactory(hconf, hcache)
          .withPath(fs, path)
          .withBlockSize(sopConfig.getBlockSize())
          .withBytesPerChecksum(sopConfig.getBytesPerChecksum())
          .withChecksumType(HFileSortedOplogFactory.convertChecksum(sopConfig.getChecksum()))
//          .withComparator(sopConfig.getComparator())
          .withCompression(HFileSortedOplogFactory.convertCompression(sopConfig.getCompression()))
          .withDataBlockEncoder(HFileSortedOplogFactory.convertEncoding(sopConfig.getKeyEncoding()))
          .create();
      
      bfw = sopConfig.isBloomFilterEnabled() ?
//          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
//              0, writer, sopConfig.getComparator())
          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
              0, writer)
          : null;
    }
 
Example 2
Source Project: gemfirexd-oss   Source File: HFileSortedOplog.java    License: Apache License 2.0 6 votes vote down vote up
public HFileSortedOplogWriter(int keys) throws IOException {
      try {
        int hfileBlockSize = Integer.getInteger(
            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));

        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
            HoplogConfig.COMPRESSION_DEFAULT));

//        ByteComparator bc = new ByteComparator();
        writer = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fsProvider.getFS(), path)
            .withBlockSize(hfileBlockSize)
//            .withComparator(bc)
            .withCompression(compress)
            .create();
        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
            writer);

        logger.fine("Created hoplog writer with compression " + compress);
      } catch (IOException e) {
        logger.fine("IO Error while creating writer");
        throw e;
      }
    }
 
Example 3
Source Project: gemfirexd-oss   Source File: HFileSortedOplog.java    License: Apache License 2.0 6 votes vote down vote up
public HFileSortedOplogWriter() throws IOException {
      writer = HFile.getWriterFactory(hconf, hcache)
          .withPath(fs, path)
          .withBlockSize(sopConfig.getBlockSize())
          .withBytesPerChecksum(sopConfig.getBytesPerChecksum())
          .withChecksumType(HFileSortedOplogFactory.convertChecksum(sopConfig.getChecksum()))
//          .withComparator(sopConfig.getComparator())
          .withCompression(HFileSortedOplogFactory.convertCompression(sopConfig.getCompression()))
          .withDataBlockEncoder(HFileSortedOplogFactory.convertEncoding(sopConfig.getKeyEncoding()))
          .create();
      
      bfw = sopConfig.isBloomFilterEnabled() ?
//          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
//              0, writer, sopConfig.getComparator())
          BloomFilterFactory.createGeneralBloomAtWrite(hconf, hcache, BloomType.ROW,
              0, writer)
          : null;
    }
 
Example 4
Source Project: gemfirexd-oss   Source File: HFileSortedOplog.java    License: Apache License 2.0 6 votes vote down vote up
public HFileSortedOplogWriter(int keys) throws IOException {
      try {
        int hfileBlockSize = Integer.getInteger(
            HoplogConfig.HFILE_BLOCK_SIZE_CONF, (1 << 16));

        Algorithm compress = Algorithm.valueOf(System.getProperty(HoplogConfig.COMPRESSION,
            HoplogConfig.COMPRESSION_DEFAULT));

//        ByteComparator bc = new ByteComparator();
        writer = HFile.getWriterFactory(conf, cacheConf)
            .withPath(fsProvider.getFS(), path)
            .withBlockSize(hfileBlockSize)
//            .withComparator(bc)
            .withCompression(compress)
            .create();
        bfw = BloomFilterFactory.createGeneralBloomAtWrite(conf, cacheConf, BloomType.ROW, keys,
            writer);

        logger.fine("Created hoplog writer with compression " + compress);
      } catch (IOException e) {
        logger.fine("IO Error while creating writer");
        throw e;
      }
    }
 
Example 5
Source Project: learning-hadoop   Source File: Configure.java    License: Apache License 2.0 5 votes vote down vote up
public static void configColumnFamily(HColumnDescriptor desc) {
  desc.setMaxVersions(1);
  // 设置使用的过滤器的类型---
  // setBloomFilter:指定是否使用BloomFilter,可提高随机查询效率。默认关闭
  desc.setBloomFilterType(BloomType.ROW);
  // 设定数据压缩类型。默认无压缩
  desc.setCompressionType(COMPRESS_TYPE);
}
 
Example 6
Source Project: learning-hadoop   Source File: TableBuilder.java    License: Apache License 2.0 5 votes vote down vote up
/**
 * @param args
 */
public static void main(String[] args) {
  Configuration conf = HBaseConfiguration.create();
  

  byte[] columnFamily = Bytes.toBytes("f");

  String tableName = "t";

  try {
    ZKUtil.applyClusterKeyToConf(conf, "edh1:2181:/hbase");
    HBaseAdmin hba = new HBaseAdmin(conf);
    if (hba.tableExists(tableName)) {
      hba.disableTable(tableName);
      hba.deleteTable(tableName);
    }
    HTableDescriptor tableDescriptor = new HTableDescriptor(tableName);
    HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamily);
    columnDescriptor.setMaxVersions(1);
    columnDescriptor.setBloomFilterType(BloomType.ROW);
    tableDescriptor.addFamily(columnDescriptor);
    hba.createTable(tableDescriptor);
    hba.close();
  } catch (IOException e) {
    e.printStackTrace();
  }

}
 
Example 7
Source Project: hadoop-arch-book   Source File: App.java    License: Apache License 2.0 5 votes vote down vote up
private static boolean createTable(byte[] tableName, byte[] columnFamilyName,
    short regionCount, long regionMaxSize, HBaseAdmin admin)
    throws IOException {

  if (admin.tableExists(tableName)) {
    return false;
  }

  HTableDescriptor tableDescriptor = new HTableDescriptor();
  tableDescriptor.setName(tableName);

  HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName);

  columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY);
  columnDescriptor.setBlocksize(64 * 1024);
  columnDescriptor.setBloomFilterType(BloomType.ROW);
  columnDescriptor.setMaxVersions(10);
  tableDescriptor.addFamily(columnDescriptor);

  tableDescriptor.setMaxFileSize(regionMaxSize);
  tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY,
      ConstantSizeRegionSplitPolicy.class.getName());

  tableDescriptor.setDeferredLogFlush(true);

  regionCount = (short) Math.abs(regionCount);

  int regionRange = Short.MAX_VALUE / regionCount;
  int counter = 0;

  byte[][] splitKeys = new byte[regionCount][];
  for (byte[] splitKey : splitKeys) {
    counter = counter + regionRange;
    String key = StringUtils.leftPad(Integer.toString(counter), 5, '0');
    splitKey = Bytes.toBytes(key);
    System.out.println(" - Split: " + splitKey);
  }
  return true;
}
 
Example 8
Source Project: HBase-ToHDFS   Source File: CreateTable.java    License: Apache License 2.0 5 votes vote down vote up
private static void createTable(String tableName, String columnFamilyName,
    short regionCount, long regionMaxSize, HBaseAdmin admin)
    throws IOException {
  System.out.println("Creating Table: " + tableName);
  
  HTableDescriptor tableDescriptor = new HTableDescriptor(); 
  tableDescriptor.setName(Bytes.toBytes(tableName));
  
  HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName);
  
  columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY);
  columnDescriptor.setBlocksize(64 * 1024);
  columnDescriptor.setBloomFilterType(BloomType.ROW);
  
  tableDescriptor.addFamily(columnDescriptor);
  
  tableDescriptor.setMaxFileSize(regionMaxSize);
  tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName());
  
  tableDescriptor.setDeferredLogFlush(true);
  
  regionCount = (short)Math.abs(regionCount);
  
  int regionRange = Short.MAX_VALUE/regionCount;
  int counter = 0;
  
  byte[][] splitKeys = new byte[regionCount][];
  for (int i = 0 ; i < splitKeys.length; i++) {
    counter = counter + regionRange;
    String key = StringUtils.leftPad(Integer.toString(counter), 5, '0');
    splitKeys[i] = Bytes.toBytes(key); 
    System.out.println(" - Split: " + i + " '" + key + "'");
  }
  
  admin.createTable(tableDescriptor, splitKeys);
}