Java Code Examples for org.apache.hadoop.hbase.HColumnDescriptor#setScope()
The following examples show how to use
org.apache.hadoop.hbase.HColumnDescriptor#setScope() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: IndexerIT.java From hbase-indexer with Apache License 2.0 | 5 votes |
/** * Creates a table wit one family, with replication enabled. */ private void createTable(String tableName, String familyName) throws Exception { HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(tableName)); HColumnDescriptor familyDescriptor = new HColumnDescriptor(familyName); familyDescriptor.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); tableDescriptor.addFamily(familyDescriptor); Admin hbaseAdmin = connection.getAdmin(); hbaseAdmin.createTable(tableDescriptor); hbaseAdmin.close(); }
Example 2
Source File: SepConsumerIT.java From hbase-indexer with Apache License 2.0 | 5 votes |
@BeforeClass public static void setUpBeforeClass() throws Exception { clusterConf = HBaseConfiguration.create(); // HACK disabled because always on in hbase-2 (see HBASE-16040) // clusterConf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true); clusterConf.setLong("replication.source.sleepforretries", 50); //clusterConf.set("replication.replicationsource.implementation", SepReplicationSource.class.getName()); clusterConf.setInt("hbase.master.info.port", -1); clusterConf.setInt("hbase.regionserver.info.port", -1); hbaseTestUtil = new HBaseTestingUtility(clusterConf); hbaseTestUtil.startMiniZKCluster(1); hbaseTestUtil.startMiniCluster(1); HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf(TABLE_NAME)); HColumnDescriptor dataColfamDescriptor = new HColumnDescriptor(DATA_COL_FAMILY); dataColfamDescriptor.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); HColumnDescriptor payloadColfamDescriptor = new HColumnDescriptor(PAYLOAD_COL_FAMILY); payloadColfamDescriptor.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); tableDescriptor.addFamily(dataColfamDescriptor); tableDescriptor.addFamily(payloadColfamDescriptor); connection = ConnectionFactory.createConnection(clusterConf); connection.getAdmin().createTable(tableDescriptor); htable = connection.getTable(TableName.valueOf(TABLE_NAME)); }
Example 3
Source File: DemoSchema.java From hbase-indexer with Apache License 2.0 | 5 votes |
public static void createSchema(Configuration hbaseConf) throws IOException { Admin admin = ConnectionFactory.createConnection(hbaseConf).getAdmin(); if (!admin.tableExists(TableName.valueOf("sep-user-demo"))) { HTableDescriptor tableDescriptor = new HTableDescriptor(TableName.valueOf("sep-user-demo")); HColumnDescriptor infoCf = new HColumnDescriptor("info"); infoCf.setScope(1); tableDescriptor.addFamily(infoCf); admin.createTable(tableDescriptor); } admin.close(); }
Example 4
Source File: HBaseMetadataProvider.java From kite with Apache License 2.0 | 5 votes |
private HColumnDescriptor configure(HColumnDescriptor column, DatasetDescriptor descriptor) { if (descriptor.hasProperty(REPLICATION_ID_PROP)) { String value = descriptor.getProperty(REPLICATION_ID_PROP); try { column.setScope(Integer.valueOf(value)); } catch (NumberFormatException e) { throw new IllegalArgumentException( "Invalid replication scope: " + value, e); } } return column; }
Example 5
Source File: CubeHTableUtil.java From kylin-on-parquet-v2 with Apache License 2.0 | 4 votes |
public static HColumnDescriptor createColumnFamily(KylinConfig kylinConfig, String cfName, boolean isMemoryHungry) { HColumnDescriptor cf = new HColumnDescriptor(cfName); cf.setMaxVersions(1); if (isMemoryHungry) { cf.setBlocksize(kylinConfig.getHbaseDefaultBlockSize()); } else { cf.setBlocksize(kylinConfig.getHbaseSmallFamilyBlockSize()); } String hbaseDefaultCC = kylinConfig.getHbaseDefaultCompressionCodec().toLowerCase(Locale.ROOT); switch (hbaseDefaultCC) { case "snappy": { logger.info("hbase will use snappy to compress data"); cf.setCompressionType(Algorithm.SNAPPY); break; } case "lzo": { logger.info("hbase will use lzo to compress data"); cf.setCompressionType(Algorithm.LZO); break; } case "gz": case "gzip": { logger.info("hbase will use gzip to compress data"); cf.setCompressionType(Algorithm.GZ); break; } case "lz4": { logger.info("hbase will use lz4 to compress data"); cf.setCompressionType(Algorithm.LZ4); break; } case "none": default: { logger.info("hbase will not use any compression algorithm to compress data"); cf.setCompressionType(Algorithm.NONE); } } try { String encodingStr = kylinConfig.getHbaseDefaultEncoding(); DataBlockEncoding encoding = DataBlockEncoding.valueOf(encodingStr); cf.setDataBlockEncoding(encoding); } catch (Exception e) { logger.info("hbase will not use any encoding", e); cf.setDataBlockEncoding(DataBlockEncoding.NONE); } cf.setInMemory(false); cf.setBloomFilterType(BloomType.NONE); cf.setScope(kylinConfig.getHBaseReplicationScope()); return cf; }
Example 6
Source File: MutableIndexReplicationIT.java From phoenix with Apache License 2.0 | 4 votes |
@Test public void testReplicationWithMutableIndexes() throws Exception { Connection conn = getConnection(); //create the primary and index tables conn.createStatement().execute( "CREATE TABLE " + DATA_TABLE_FULL_NAME + " (k VARCHAR NOT NULL PRIMARY KEY, v1 VARCHAR, v2 VARCHAR)"); conn.createStatement().execute( "CREATE INDEX " + INDEX_TABLE_NAME + " ON " + DATA_TABLE_FULL_NAME + " (v1)"); // make sure that the tables are empty, but reachable String query = "SELECT * FROM " + DATA_TABLE_FULL_NAME; ResultSet rs = conn.createStatement().executeQuery(query); assertFalse(rs.next()); //make sure there is no data in the table query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME; rs = conn.createStatement().executeQuery(query); assertFalse(rs.next()); // make sure the data tables are created on the remote cluster HBaseAdmin admin = utility1.getHBaseAdmin(); HBaseAdmin admin2 = utility2.getHBaseAdmin(); List<String> dataTables = new ArrayList<String>(); dataTables.add(DATA_TABLE_FULL_NAME); dataTables.add(INDEX_TABLE_FULL_NAME); for (String tableName : dataTables) { HTableDescriptor desc = admin.getTableDescriptor(TableName.valueOf(tableName)); //create it as-is on the remote cluster admin2.createTable(desc); LOG.info("Enabling replication on source table: "+tableName); HColumnDescriptor[] cols = desc.getColumnFamilies(); assertEquals(1, cols.length); // add the replication scope to the column HColumnDescriptor col = desc.removeFamily(cols[0].getName()); col.setScope(HConstants.REPLICATION_SCOPE_GLOBAL); desc.addFamily(col); //disable/modify/enable table so it has replication enabled admin.disableTable(desc.getTableName()); admin.modifyTable(tableName, desc); admin.enableTable(desc.getTableName()); LOG.info("Replication enabled on source table: "+tableName); } // load some data into the source cluster table PreparedStatement stmt = conn.prepareStatement("UPSERT INTO " + DATA_TABLE_FULL_NAME + " VALUES(?,?,?)"); stmt.setString(1, "a"); // k stmt.setString(2, "x"); // v1 <- has index stmt.setString(3, "1"); // v2 stmt.execute(); conn.commit(); // make sure the index is working as expected query = "SELECT * FROM " + INDEX_TABLE_FULL_NAME; rs = conn.createStatement().executeQuery(query); assertTrue(rs.next()); assertEquals("x", rs.getString(1)); assertFalse(rs.next()); conn.close(); /* Validate that we have replicated the rows to the remote cluster */ // other table can't be reached through Phoenix right now - would need to change how we // lookup tables. For right now, we just go through an HTable LOG.info("Looking up tables in replication target"); TableName[] tables = admin2.listTableNames(); HTable remoteTable = new HTable(utility2.getConfiguration(), tables[0]); for (int i = 0; i < REPLICATION_RETRIES; i++) { if (i >= REPLICATION_RETRIES - 1) { fail("Waited too much time for put replication on table " + remoteTable .getTableDescriptor().getNameAsString()); } if (ensureAnyRows(remoteTable)) { break; } LOG.info("Sleeping for " + REPLICATION_WAIT_TIME_MILLIS + " for edits to get replicated"); Thread.sleep(REPLICATION_WAIT_TIME_MILLIS); } remoteTable.close(); }
Example 7
Source File: CubeHTableUtil.java From kylin with Apache License 2.0 | 4 votes |
public static HColumnDescriptor createColumnFamily(KylinConfig kylinConfig, String cfName, boolean isMemoryHungry) { HColumnDescriptor cf = new HColumnDescriptor(cfName); cf.setMaxVersions(1); if (isMemoryHungry) { cf.setBlocksize(kylinConfig.getHbaseDefaultBlockSize()); } else { cf.setBlocksize(kylinConfig.getHbaseSmallFamilyBlockSize()); } String hbaseDefaultCC = kylinConfig.getHbaseDefaultCompressionCodec().toLowerCase(Locale.ROOT); switch (hbaseDefaultCC) { case "snappy": { logger.info("hbase will use snappy to compress data"); cf.setCompressionType(Algorithm.SNAPPY); break; } case "lzo": { logger.info("hbase will use lzo to compress data"); cf.setCompressionType(Algorithm.LZO); break; } case "gz": case "gzip": { logger.info("hbase will use gzip to compress data"); cf.setCompressionType(Algorithm.GZ); break; } case "lz4": { logger.info("hbase will use lz4 to compress data"); cf.setCompressionType(Algorithm.LZ4); break; } case "none": default: { logger.info("hbase will not use any compression algorithm to compress data"); cf.setCompressionType(Algorithm.NONE); } } try { String encodingStr = kylinConfig.getHbaseDefaultEncoding(); DataBlockEncoding encoding = DataBlockEncoding.valueOf(encodingStr); cf.setDataBlockEncoding(encoding); } catch (Exception e) { logger.info("hbase will not use any encoding", e); cf.setDataBlockEncoding(DataBlockEncoding.NONE); } cf.setInMemory(false); cf.setBloomFilterType(BloomType.NONE); cf.setScope(kylinConfig.getHBaseReplicationScope()); return cf; }
Example 8
Source File: TableCommand.java From pinpoint with Apache License 2.0 | 4 votes |
private HColumnDescriptor newColumnDescriptor(ColumnFamilyChange columnFamilyChange) { HColumnDescriptor hcd = new HColumnDescriptor(columnFamilyChange.getName()); ColumnFamilyConfiguration columnFamilyConfiguration = columnFamilyChange.getColumnFamilyConfiguration(); Boolean blockCacheEnabled = columnFamilyConfiguration.getBlockCacheEnabled(); if (blockCacheEnabled != null) { hcd.setBlockCacheEnabled(blockCacheEnabled); } Integer replicationScope = columnFamilyConfiguration.getReplicationScope(); if (replicationScope != null) { hcd.setScope(replicationScope); } Boolean inMemory = columnFamilyConfiguration.getInMemory(); if (inMemory != null) { hcd.setInMemory(inMemory); } Integer timeToLive = columnFamilyConfiguration.getTimeToLive(); if (timeToLive != null) { hcd.setTimeToLive(timeToLive); } ColumnFamilyConfiguration.DataBlockEncoding dataBlockEncoding = columnFamilyConfiguration.getDataBlockEncoding(); if (dataBlockEncoding != null) { hcd.setDataBlockEncoding(DataBlockEncoding.valueOf(dataBlockEncoding.name())); } Integer blockSize = columnFamilyConfiguration.getBlockSize(); if (blockSize != null) { hcd.setBlocksize(blockSize); } Integer maxVersions = columnFamilyConfiguration.getMaxVersions(); if (maxVersions != null) { hcd.setMaxVersions(maxVersions); } Integer minVersions = columnFamilyConfiguration.getMinVersions(); if (minVersions != null) { hcd.setMinVersions(minVersions); } ColumnFamilyConfiguration.BloomFilter bloomFilter = columnFamilyConfiguration.getBloomFilter(); if (bloomFilter != null) { hcd.setBloomFilterType(BloomType.valueOf(bloomFilter.name())); } if (compressionAlgorithm != Compression.Algorithm.NONE) { hcd.setCompressionType(compressionAlgorithm); } return hcd; }