org.apache.hadoop.hbase.regionserver.BloomType Java Examples
The following examples show how to use
org.apache.hadoop.hbase.regionserver.BloomType.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AccessController.java From hbase with Apache License 2.0 | 6 votes |
/** * Create the ACL table * @throws IOException */ private static void createACLTable(Admin admin) throws IOException { /** Table descriptor for ACL table */ ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(PermissionStorage.ACL_LIST_FAMILY). setMaxVersions(1). setInMemory(true). setBlockCacheEnabled(true). setBlocksize(8 * 1024). setBloomFilterType(BloomType.NONE). setScope(HConstants.REPLICATION_SCOPE_LOCAL).build(); TableDescriptor td = TableDescriptorBuilder.newBuilder(PermissionStorage.ACL_TABLE_NAME). setColumnFamily(cfd).build(); admin.createTable(td); }
Example #2
Source File: HBaseBasedAuditRepository.java From incubator-atlas with Apache License 2.0 | 6 votes |
private void createTableIfNotExists() throws AtlasException { Admin admin = null; try { admin = connection.getAdmin(); LOG.info("Checking if table {} exists", tableName.getNameAsString()); if (!admin.tableExists(tableName)) { LOG.info("Creating table {}", tableName.getNameAsString()); HTableDescriptor tableDescriptor = new HTableDescriptor(tableName); HColumnDescriptor columnFamily = new HColumnDescriptor(COLUMN_FAMILY); columnFamily.setMaxVersions(1); columnFamily.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); columnFamily.setCompressionType(Compression.Algorithm.GZ); columnFamily.setBloomFilterType(BloomType.ROW); tableDescriptor.addFamily(columnFamily); admin.createTable(tableDescriptor); } else { LOG.info("Table {} exists", tableName.getNameAsString()); } } catch (IOException e) { throw new AtlasException(e); } finally { close(admin); } }
Example #3
Source File: HBaseEntitySchemaManager.java From eagle with Apache License 2.0 | 6 votes |
private void createTable(EntityDefinition entityDefinition) throws IOException { String tableName = entityDefinition.getTable(); if (admin.tableExists(tableName)) { LOG.info("Table {} already exists", tableName); } else { HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName)); // Adding column families to table descriptor HColumnDescriptor columnDescriptor = new HColumnDescriptor(entityDefinition.getColumnFamily()); columnDescriptor.setBloomFilterType(BloomType.ROW); //columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY); columnDescriptor.setMaxVersions(DEFAULT_MAX_VERSIONS); tableDescriptor.addFamily(columnDescriptor); // Execute the table through admin admin.createTable(tableDescriptor); LOG.info("Successfully create Table {}", tableName); } }
Example #4
Source File: CreateTable.java From examples with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { try (Connection connection = ConnectionFactory.createConnection(); Admin admin = connection.getAdmin();) { LOG.info("Starting table creation"); // tag::CREATE[] TableName documents = TableName.valueOf("documents"); HTableDescriptor desc = new HTableDescriptor(documents); HColumnDescriptor family = new HColumnDescriptor("c"); family.setCompressionType(Algorithm.GZ); family.setBloomFilterType(BloomType.NONE); desc.addFamily(family); UniformSplit uniformSplit = new UniformSplit(); admin.createTable(desc, uniformSplit.split(8)); // end::CREATE[] LOG.info("Table successfuly created"); } }
Example #5
Source File: TestHFileOutputFormat2.java From hbase with Apache License 2.0 | 6 votes |
/** * @return a map from column family names to compression algorithms for * testing column family compression. Column family names have special characters */ private Map<String, BloomType> getMockColumnFamiliesForBloomType (int numCfs) { Map<String, BloomType> familyToBloomType = new HashMap<>(); // use column family names having special characters if (numCfs-- > 0) { familyToBloomType.put("Family1!@#!@#&", BloomType.ROW); } if (numCfs-- > 0) { familyToBloomType.put("Family2=asdads&!AASD", BloomType.ROWCOL); } if (numCfs-- > 0) { familyToBloomType.put("Family3", BloomType.NONE); } return familyToBloomType; }
Example #6
Source File: HBaseBasedAuditRepository.java From atlas with Apache License 2.0 | 6 votes |
private void createTableIfNotExists() throws AtlasException { Admin admin = null; try { admin = connection.getAdmin(); LOG.info("Checking if table {} exists", tableName.getNameAsString()); if (!admin.tableExists(tableName)) { LOG.info("Creating table {}", tableName.getNameAsString()); HTableDescriptor tableDescriptor = new HTableDescriptor(tableName); HColumnDescriptor columnFamily = new HColumnDescriptor(COLUMN_FAMILY); columnFamily.setMaxVersions(1); columnFamily.setDataBlockEncoding(DataBlockEncoding.FAST_DIFF); columnFamily.setCompressionType(Compression.Algorithm.GZ); columnFamily.setBloomFilterType(BloomType.ROW); tableDescriptor.addFamily(columnFamily); admin.createTable(tableDescriptor); } else { LOG.info("Table {} exists", tableName.getNameAsString()); } } catch (IOException e) { throw new AtlasException(e); } finally { close(admin); } }
Example #7
Source File: CreateTableIT.java From phoenix with Apache License 2.0 | 6 votes |
@Test public void testCreateTableColumnFamilyHBaseAttribs8() throws Exception { String tableName = generateUniqueName(); String ddl = "create table IF NOT EXISTS " + tableName + " (" + " id char(1) NOT NULL," + " col1 integer NOT NULL," + " col2 bigint NOT NULL," + " CONSTRAINT NAME_PK PRIMARY KEY (id, col1, col2)" + " ) BLOOMFILTER = 'ROW', SALT_BUCKETS = 4"; Properties props = new Properties(); Connection conn = DriverManager.getConnection(getUrl(), props); conn.createStatement().execute(ddl); Admin admin = driver.getConnectionQueryServices(getUrl(), props).getAdmin(); ColumnFamilyDescriptor[] columnFamilies = admin.getDescriptor(TableName.valueOf(tableName)).getColumnFamilies(); assertEquals(BloomType.ROW, columnFamilies[0].getBloomFilterType()); }
Example #8
Source File: Create2.java From examples with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws MasterNotRunningException, ZooKeeperConnectionException, IOException { Configuration conf = HBaseConfiguration.create(); HBaseAdmin admin = new HBaseAdmin(conf); // tag::CREATE2[] HTableDescriptor desc = new HTableDescriptor(TableName.valueOf("pages")); byte[][] splits = {Bytes.toBytes("b"), Bytes.toBytes("f"), Bytes.toBytes("k"), Bytes.toBytes("n"), Bytes.toBytes("t")}; desc.setValue(Bytes.toBytes("comment"), Bytes.toBytes("Create 10012014")); HColumnDescriptor family = new HColumnDescriptor("c"); family.setCompressionType(Algorithm.GZ); family.setMaxVersions(52); family.setBloomFilterType(BloomType.ROW); desc.addFamily(family); admin.createTable(desc, splits); // end::CREATE2[] admin.close(); }
Example #9
Source File: BloomFilterUtil.java From hbase with Apache License 2.0 | 6 votes |
/** * Creates a Bloom filter chunk of the given size. * * @param byteSizeHint the desired number of bytes for the Bloom filter bit * array. Will be increased so that folding is possible. * @param errorRate target false positive rate of the Bloom filter * @param hashType Bloom filter hash function type * @param foldFactor * @param bloomType * @return the new Bloom filter of the desired size */ public static BloomFilterChunk createBySize(int byteSizeHint, double errorRate, int hashType, int foldFactor, BloomType bloomType) { BloomFilterChunk bbf = new BloomFilterChunk(hashType, bloomType); bbf.byteSize = computeFoldableByteSize(byteSizeHint * 8L, foldFactor); long bitSize = bbf.byteSize * 8; bbf.maxKeys = (int) idealMaxKeys(bitSize, errorRate); bbf.hashCount = optimalFunctionCount(bbf.maxKeys, bitSize); // Adjust max keys to bring error rate closer to what was requested, // because byteSize was adjusted to allow for folding, and hashCount was // rounded. bbf.maxKeys = (int) computeMaxKeys(bitSize, errorRate, bbf.hashCount); return bbf; }
Example #10
Source File: MobSnapshotTestingUtils.java From hbase with Apache License 2.0 | 6 votes |
/** * Create a Mob table. * * @param util * @param tableName * @param families * @return An Table instance for the created table. * @throws IOException */ public static Table createMobTable(final HBaseTestingUtility util, final TableName tableName, final byte[]... families) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(tableName); for (byte[] family : families) { // Disable blooms (they are on by default as of 0.95) but we disable them // here because // tests have hard coded counts of what to expect in block cache, etc., // and blooms being // on is interfering. builder.setColumnFamily(ColumnFamilyDescriptorBuilder.newBuilder(family) .setBloomFilterType(BloomType.NONE) .setMobEnabled(true) .setMobThreshold(0L) .build()); } util.getAdmin().createTable(builder.build()); // HBaseAdmin only waits for regions to appear in hbase:meta we should wait // until they are assigned util.waitUntilAllRegionsAssigned(tableName); return ConnectionFactory.createConnection(util.getConfiguration()).getTable(tableName); }
Example #11
Source File: HBaseTestingUtility.java From hbase with Apache License 2.0 | 6 votes |
/** * Create a set of column descriptors with the combination of compression, * encoding, bloom codecs available. * @param prefix family names prefix * @return the list of column descriptors */ public static List<ColumnFamilyDescriptor> generateColumnDescriptors(final String prefix) { List<ColumnFamilyDescriptor> columnFamilyDescriptors = new ArrayList<>(); long familyId = 0; for (Compression.Algorithm compressionType: getSupportedCompressionAlgorithms()) { for (DataBlockEncoding encodingType: DataBlockEncoding.values()) { for (BloomType bloomType: BloomType.values()) { String name = String.format("%s-cf-!@#&-%d!@#", prefix, familyId); ColumnFamilyDescriptorBuilder columnFamilyDescriptorBuilder = ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(name)); columnFamilyDescriptorBuilder.setCompressionType(compressionType); columnFamilyDescriptorBuilder.setDataBlockEncoding(encodingType); columnFamilyDescriptorBuilder.setBloomFilterType(bloomType); columnFamilyDescriptors.add(columnFamilyDescriptorBuilder.build()); familyId++; } } } return columnFamilyDescriptors; }
Example #12
Source File: HBaseTestingUtility.java From hbase with Apache License 2.0 | 6 votes |
/** * Create a table. * @param htd table descriptor * @param families array of column families * @param splitKeys array of split keys * @param type Bloom type * @param blockSize block size * @param c Configuration to use * @return A Table instance for the created table. * @throws IOException if getAdmin or createTable fails */ public Table createTable(TableDescriptor htd, byte[][] families, byte[][] splitKeys, BloomType type, int blockSize, Configuration c) throws IOException { TableDescriptorBuilder builder = TableDescriptorBuilder.newBuilder(htd); for (byte[] family : families) { ColumnFamilyDescriptorBuilder cfdb = ColumnFamilyDescriptorBuilder.newBuilder(family) .setBloomFilterType(type) .setBlocksize(blockSize); if (isNewVersionBehaviorEnabled()) { cfdb.setNewVersionBehavior(true); } builder.setColumnFamily(cfdb.build()); } TableDescriptor td = builder.build(); if (splitKeys != null) { getAdmin().createTable(td, splitKeys); } else { getAdmin().createTable(td); } // HBaseAdmin only waits for regions to appear in hbase:meta // we should wait until they are assigned waitUntilAllRegionsAssigned(td.getTableName()); return getConnection().getTable(td.getTableName()); }
Example #13
Source File: TestBulkLoadHFiles.java From hbase with Apache License 2.0 | 6 votes |
private void runTest(String testName, BloomType bloomType, byte[][] tableSplitKeys, byte[][][] hfileRanges, boolean useMap) throws Exception { final byte[] TABLE_NAME = Bytes.toBytes("mytable_" + testName); final boolean preCreateTable = tableSplitKeys != null; // Run the test bulkloading the table to the default namespace final TableName TABLE_WITHOUT_NS = TableName.valueOf(TABLE_NAME); runTest(testName, TABLE_WITHOUT_NS, bloomType, preCreateTable, tableSplitKeys, hfileRanges, useMap, 2); /* * Run the test bulkloading the table from a depth of 3 directory structure is now baseDirectory * -- regionDir -- familyDir -- storeFileDir */ if (preCreateTable) { runTest(testName + 2, TABLE_WITHOUT_NS, bloomType, true, tableSplitKeys, hfileRanges, false, 3); } // Run the test bulkloading the table to the specified namespace final TableName TABLE_WITH_NS = TableName.valueOf(Bytes.toBytes(NAMESPACE), TABLE_NAME); runTest(testName, TABLE_WITH_NS, bloomType, preCreateTable, tableSplitKeys, hfileRanges, useMap, 2); }
Example #14
Source File: BloomFilterChunk.java From hbase with Apache License 2.0 | 6 votes |
public void add(Cell cell) { /* * For faster hashing, use combinatorial generation * http://www.eecs.harvard.edu/~kirsch/pubs/bbbf/esa06.pdf */ int hash1; int hash2; HashKey<Cell> hashKey; if (this.bloomType == BloomType.ROWCOL) { hashKey = new RowColBloomHashKey(cell); hash1 = this.hash.hash(hashKey, 0); hash2 = this.hash.hash(hashKey, hash1); } else { hashKey = new RowBloomHashKey(cell); hash1 = this.hash.hash(hashKey, 0); hash2 = this.hash.hash(hashKey, hash1); } setHashLoc(hash1, hash2); }
Example #15
Source File: CompoundBloomFilterWriter.java From hbase with Apache License 2.0 | 6 votes |
@Override public void append(Cell cell) throws IOException { Objects.requireNonNull(cell); enqueueReadyChunk(false); if (chunk == null) { if (firstKeyInChunk != null) { throw new IllegalStateException("First key in chunk already set: " + Bytes.toStringBinary(firstKeyInChunk)); } // This will be done only once per chunk if (bloomType == BloomType.ROWCOL) { firstKeyInChunk = PrivateCellUtil .getCellKeySerializedAsKeyValueKey(PrivateCellUtil.createFirstOnRowCol(cell)); } else { firstKeyInChunk = CellUtil.copyRow(cell); } allocateNewChunk(); } chunk.add(cell); this.prevCell = cell; ++totalKeyCount; }
Example #16
Source File: CompoundBloomFilter.java From hbase with Apache License 2.0 | 6 votes |
@Override public boolean contains(Cell keyCell, ByteBuff bloom, BloomType type) { int block = index.rootBlockContainingKey(keyCell); if (block < 0) { return false; // This key is not in the file. } boolean result; HFileBlock bloomBlock = getBloomBlock(block); try { ByteBuff bloomBuf = bloomBlock.getBufferReadOnly(); result = BloomFilterUtil.contains(keyCell, bloomBuf, bloomBlock.headerSize(), bloomBlock.getUncompressedSizeWithoutHeader(), hash, hashCount, type); } finally { // After the use, should release the block to deallocate the byte buffers. bloomBlock.release(); } if (numPositivesPerChunk != null && result) { // Update statistics. Only used in unit tests. ++numPositivesPerChunk[block]; } return result; }
Example #17
Source File: TestMobFile.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testGetScanner() throws Exception { Path testDir = TEST_UTIL.getDataTestDir(); FileSystem fs = testDir.getFileSystem(conf); HFileContext meta = new HFileContextBuilder().withBlockSize(8*1024).build(); StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, fs) .withOutputDir(testDir) .withFileContext(meta) .build(); MobTestUtil.writeStoreFile(writer, testName.getMethodName()); MobFile mobFile = new MobFile(new HStoreFile(fs, writer.getPath(), conf, cacheConf, BloomType.NONE, true)); assertNotNull(mobFile.getScanner()); assertTrue(mobFile.getScanner() instanceof StoreFileScanner); }
Example #18
Source File: TestMobStoreCompaction.java From hbase with Apache License 2.0 | 6 votes |
private long countMobCellsInMetadata() throws IOException { long mobCellsCount = 0; Path mobDirPath = MobUtils.getMobFamilyPath(conf, tableDescriptor.getTableName(), familyDescriptor.getNameAsString()); Configuration copyOfConf = new Configuration(conf); copyOfConf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0f); CacheConfig cacheConfig = new CacheConfig(copyOfConf); if (fs.exists(mobDirPath)) { FileStatus[] files = UTIL.getTestFileSystem().listStatus(mobDirPath); for (FileStatus file : files) { HStoreFile sf = new HStoreFile(fs, file.getPath(), conf, cacheConfig, BloomType.NONE, true); sf.initReader(); Map<byte[], byte[]> fileInfo = sf.getReader().loadFileInfo(); byte[] count = fileInfo.get(MOB_CELLS_COUNT); assertTrue(count != null); mobCellsCount += Bytes.toLong(count); } } return mobCellsCount; }
Example #19
Source File: HFileOutputFormat2.java From hbase with Apache License 2.0 | 5 votes |
/** * Runs inside the task to deserialize column family to bloom filter type * map from the configuration. * * @param conf to read the serialized values from * @return a map from column family to the the configured bloom filter type */ @VisibleForTesting static Map<byte[], BloomType> createFamilyBloomTypeMap(Configuration conf) { Map<byte[], String> stringMap = createFamilyConfValueMap(conf, BLOOM_TYPE_FAMILIES_CONF_KEY); Map<byte[], BloomType> bloomTypeMap = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (Map.Entry<byte[], String> e : stringMap.entrySet()) { BloomType bloomType = BloomType.valueOf(e.getValue()); bloomTypeMap.put(e.getKey(), bloomType); } return bloomTypeMap; }
Example #20
Source File: TestHFileOutputFormat2.java From hbase with Apache License 2.0 | 5 votes |
private void setupMockColumnFamiliesForBloomType(Table table, Map<String, BloomType> familyToDataBlockEncoding) throws IOException { TableDescriptorBuilder mockTableDescriptor = TableDescriptorBuilder.newBuilder(TABLE_NAMES[0]); for (Entry<String, BloomType> entry : familyToDataBlockEncoding.entrySet()) { ColumnFamilyDescriptor columnFamilyDescriptor = ColumnFamilyDescriptorBuilder .newBuilder(Bytes.toBytes(entry.getKey())) .setMaxVersions(1) .setBloomFilterType(entry.getValue()) .setBlockCacheEnabled(false) .setTimeToLive(0).build(); mockTableDescriptor.setColumnFamily(columnFamilyDescriptor); } Mockito.doReturn(mockTableDescriptor).when(table).getDescriptor(); }
Example #21
Source File: TestCompactor.java From hbase with Apache License 2.0 | 5 votes |
public static HStoreFile createDummyStoreFile(long maxSequenceId) throws Exception { // "Files" are totally unused, it's Scanner class below that gives compactor fake KVs. // But compaction depends on everything under the sun, so stub everything with dummies. HStoreFile sf = mock(HStoreFile.class); StoreFileReader r = mock(StoreFileReader.class); when(r.length()).thenReturn(1L); when(r.getBloomFilterType()).thenReturn(BloomType.NONE); when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class)); when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong(), anyLong(), anyBoolean())).thenReturn(mock(StoreFileScanner.class)); when(sf.getReader()).thenReturn(r); when(sf.getMaxSequenceId()).thenReturn(maxSequenceId); return sf; }
Example #22
Source File: TestStripeCompactionPolicy.java From hbase with Apache License 2.0 | 5 votes |
private static HStoreFile createFile(long size) throws Exception { HStoreFile sf = mock(HStoreFile.class); when(sf.getPath()).thenReturn(new Path("moo")); StoreFileReader r = mock(StoreFileReader.class); when(r.getEntries()).thenReturn(size); when(r.length()).thenReturn(size); when(r.getBloomFilterType()).thenReturn(BloomType.NONE); when(r.getHFileReader()).thenReturn(mock(HFile.Reader.class)); when(r.getStoreFileScanner(anyBoolean(), anyBoolean(), anyBoolean(), anyLong(), anyLong(), anyBoolean())).thenReturn(mock(StoreFileScanner.class)); when(sf.getReader()).thenReturn(r); when(sf.getBulkLoadTimestamp()).thenReturn(OptionalLong.empty()); return sf; }
Example #23
Source File: TestEncodedSeekers.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testEncodedSeeker() throws IOException { System.err.println("Testing encoded seekers for encoding : " + encoding + ", includeTags : " + includeTags + ", compressTags : " + compressTags); if(includeTags) { testUtil.getConfiguration().setInt(HFile.FORMAT_VERSION_KEY, 3); } LruBlockCache cache = (LruBlockCache) BlockCacheFactory.createBlockCache(testUtil.getConfiguration()); // Need to disable default row bloom filter for this test to pass. ColumnFamilyDescriptor cfd = ColumnFamilyDescriptorBuilder.newBuilder(CF_BYTES).setMaxVersions(MAX_VERSIONS). setDataBlockEncoding(encoding). setBlocksize(BLOCK_SIZE). setBloomFilterType(BloomType.NONE). setCompressTags(compressTags).build(); HRegion region = testUtil.createTestRegion(TABLE_NAME, cfd, cache); //write the data, but leave some in the memstore doPuts(region); //verify correctness when memstore contains data doGets(region); //verify correctness again after compacting region.compact(false); doGets(region); Map<DataBlockEncoding, Integer> encodingCounts = cache.getEncodingCountsForTest(); // Ensure that compactions don't pollute the cache with unencoded blocks // in case of in-cache-only encoding. System.err.println("encodingCounts=" + encodingCounts); assertEquals(1, encodingCounts.size()); DataBlockEncoding encodingInCache = encodingCounts.keySet().iterator().next(); assertEquals(encoding, encodingInCache); assertTrue(encodingCounts.get(encodingInCache) > 0); }
Example #24
Source File: TestBulkLoadHFiles.java From hbase with Apache License 2.0 | 5 votes |
@Test public void testSplitALot() throws Exception { runTest("testSplitALot", BloomType.NONE, new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("bbb"), Bytes.toBytes("ccc"), Bytes.toBytes("ddd"), Bytes.toBytes("eee"), Bytes.toBytes("fff"), Bytes.toBytes("ggg"), Bytes.toBytes("hhh"), Bytes.toBytes("iii"), Bytes.toBytes("lll"), Bytes.toBytes("mmm"), Bytes.toBytes("nnn"), Bytes.toBytes("ooo"), Bytes.toBytes("ppp"), Bytes.toBytes("qqq"), Bytes.toBytes("rrr"), Bytes.toBytes("sss"), Bytes.toBytes("ttt"), Bytes.toBytes("uuu"), Bytes.toBytes("vvv"), Bytes.toBytes("zzz"), }, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("zzz") }, }); }
Example #25
Source File: TestBulkLoadHFiles.java From hbase with Apache License 2.0 | 5 votes |
/** * Test case that creates some regions and loads HFiles that have different region boundaries than * the table pre-split. */ @Test public void testSimpleHFileSplit() throws Exception { runTest("testHFileSplit", BloomType.NONE, new byte[][] { Bytes.toBytes("aaa"), Bytes.toBytes("fff"), Bytes.toBytes("jjj"), Bytes.toBytes("ppp"), Bytes.toBytes("uuu"), Bytes.toBytes("zzz"), }, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("lll") }, new byte[][] { Bytes.toBytes("mmm"), Bytes.toBytes("zzz") }, }); }
Example #26
Source File: HBaseCreateTable.java From SparkOnALog with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws IOException { if (args.length == 0) { System.out.println("CreateTable {tableName} {columnFamilyName}"); return; } String tableName = args[0]; String columnFamilyName = args[1]; HBaseAdmin admin = new HBaseAdmin(new Configuration()); HTableDescriptor tableDescriptor = new HTableDescriptor(); tableDescriptor.setName(Bytes.toBytes(tableName)); HColumnDescriptor columnDescriptor = new HColumnDescriptor(columnFamilyName); columnDescriptor.setCompressionType(Compression.Algorithm.SNAPPY); columnDescriptor.setBlocksize(64 * 1024); columnDescriptor.setBloomFilterType(BloomType.ROW); tableDescriptor.addFamily(columnDescriptor); //tableDescriptor.setValue(tableDescriptor.SPLIT_POLICY, ConstantSizeRegionSplitPolicy.class.getName()); System.out.println("-Creating Table"); admin.createTable(tableDescriptor); admin.close(); System.out.println("-Done"); }
Example #27
Source File: HFileOutputFormat3.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
/** * Runs inside the task to deserialize column family to bloom filter type * map from the configuration. * * @param conf to read the serialized values from * @return a map from column family to the the configured bloom filter type */ @VisibleForTesting static Map<byte[], BloomType> createFamilyBloomTypeMap(Configuration conf) { Map<byte[], String> stringMap = createFamilyConfValueMap(conf, BLOOM_TYPE_FAMILIES_CONF_KEY); Map<byte[], BloomType> bloomTypeMap = new TreeMap<byte[], BloomType>(Bytes.BYTES_COMPARATOR); for (Map.Entry<byte[], String> e : stringMap.entrySet()) { BloomType bloomType = BloomType.valueOf(e.getValue()); bloomTypeMap.put(e.getKey(), bloomType); } return bloomTypeMap; }
Example #28
Source File: TestBulkLoadHFiles.java From hbase with Apache License 2.0 | 5 votes |
/** * Test loading into a column family that has a ROWCOL bloom filter. */ @Test public void testRegionCrossingRowColBloom() throws Exception { runTest("testRegionCrossingLoadRowColBloom", BloomType.ROWCOL, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") }, new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, }); }
Example #29
Source File: HBaseConnectionFactory.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
public HTableDescriptor generateTransactionTable(){ HTableDescriptor desc=new HTableDescriptor(TableName.valueOf(namespaceBytes, HConfiguration.TRANSACTION_TABLE_BYTES)); HColumnDescriptor columnDescriptor=new HColumnDescriptor(DEFAULT_FAMILY_BYTES); columnDescriptor.setMaxVersions(5); Compression.Algorithm compress=Compression.getCompressionAlgorithmByName(config.getCompressionAlgorithm()); columnDescriptor.setCompressionType(compress); columnDescriptor.setInMemory(HConfiguration.DEFAULT_IN_MEMORY); columnDescriptor.setBlockCacheEnabled(HConfiguration.DEFAULT_BLOCKCACHE); columnDescriptor.setBloomFilterType(BloomType.valueOf(HConfiguration.DEFAULT_BLOOMFILTER.toUpperCase())); columnDescriptor.setTimeToLive(HConfiguration.DEFAULT_TTL); desc.addFamily(columnDescriptor); desc.addFamily(new HColumnDescriptor(Bytes.toBytes(SI_PERMISSION_FAMILY))); return desc; }
Example #30
Source File: TestBulkLoadHFiles.java From hbase with Apache License 2.0 | 5 votes |
/** * Test case that creates some regions and loads HFiles that cross the boundaries of those regions */ @Test public void testRegionCrossingLoad() throws Exception { runTest("testRegionCrossingLoad", BloomType.NONE, new byte[][][] { new byte[][] { Bytes.toBytes("aaaa"), Bytes.toBytes("eee") }, new byte[][] { Bytes.toBytes("fff"), Bytes.toBytes("zzz") }, }); }